blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
608a35b305264d22101fae93881d6336cfdbead0 | 3fb61cb2da8cad71b214a7faadffc27bcd1a5315 | /transformServices/app/gleanomatic/RSRestClient.py | c9ba11949e70f0f5f7fef4fed1d21da9d7111ea4 | [] | no_license | dhenry314/gleanomatic | 4ed0ed80836e4aa622392ec804e6ca2a95336a7b | 9c9c8ab9a6da83d4a1fc429289c7450bf606005b | refs/heads/master | 2021-07-06T02:55:43.571032 | 2019-04-15T18:21:12 | 2019-04-15T18:21:12 | 148,791,398 | 0 | 1 | null | 2018-10-04T21:35:10 | 2018-09-14T13:19:57 | Python | UTF-8 | Python | false | false | 5,442 | py | # RSRestClient - client to interact with a RSEngine REST endpoint
import urllib.request
import urllib.parse
import json
import gleanomatic.Utils as Utils
from gleanomatic.GleanomaticErrors import BadResourceURL, AddResourceError, AddDumpException
import gleanomatic.gleanomaticLogger as gl
logger = gl.logger
class RSRestClient:
endpointURI = None
resourceURI = None
capabilityURI = None
def __init__(self,endpointURI):
logger.info("Initializing RSRestClient")
#ensure that there is a trailing slash on the endpoint
if endpointURI[-1] != "/":
endpointURI = str(endpointURI) + "/"
self.endpointURI = endpointURI
self.resourceURI = str(self.endpointURI) + "resource"
logger.info("Checking resourceURI: " + str(self.resourceURI))
try:
Utils.checkURI(self.resourceURI)
except Exception as e:
logger.critical("ResourceURI did not validate: " + str(self.resourceURI) + " ERROR:" + str(e))
raise TargetURIException("ResourceURI did not validate: " + str(self.resourceURI) ,e)
self.capabilityURI = str(self.endpointURI) + "capability"
def getMessage(self,record):
message = Utils.getRecordAttr(record,'message')
msg = Utils.getRecordAttr(record,'msg')
if message:
return message
if msg:
return msg
return None
def addResource(self,uri,sourceNamespace,setNamespace,batchTag=None):
logger.info("Adding resource with uri: " + str(uri))
record = None
message = None
try:
Utils.checkURI(uri)
except URIException as e:
raise Exception("Resource uri did not validate. uri: " + str(uri))
params = {'sourceNamespace' : sourceNamespace, 'setNamespace' : setNamespace, 'uri': uri}
if batchTag:
params['batchTag'] = batchTag
try:
response = Utils.postRSData(self.resourceURI,params)
except Exception as e:
raise BadResourceURL("Could not add resource. resourceURI: " + str(self.resourceURI), e)
record = Utils.getJSONFromResponse(response)
message = self.getMessage(record)
if message:
logger.warning(message)
return record, message
def addDump(self,batchTag,sourceNamespace,setNamespace):
response = None
params = {'sourceNamespace' : sourceNamespace, 'setNamespace' : setNamespace, 'batchTag': batchTag}
try:
response = Utils.postRSData(self.capabilityURI,params)
except Exception as e:
raise AddDumpException("Could not post dump.",e)
d = Utils.getJSONFromResponse(response)
d = self.convertToRSDomain(d)
return d
def convertToRSDomain(self,url):
if '/static/' in str(url):
parts = str(url).split('/static/')
url = 'http://resourcesync/static/' + parts[1]
return url
def deleteResource(self,uri):
headers = {
'Content-Type': 'application/json;charset=UTF-8'
}
try:
req = urllib.request.Request(
uri,
headers=headers,
method='DELETE'
)
response = urllib.request.urlopen(req)
except urllib.error.URLError as e:
raise BadResourceURL(uri,e)
d = response.read()
return d
def getResources(self,offset=0,count=20):
url = self.endpointURI + str("resource")
url = str(url) + "?offset=" + str(offset) + "&count=" + str(count)
urlCheck = Utils.checkURI(url)
if not urlCheck:
return False
f = urllib.request.urlopen(url)
contents = Utils.getContent(url)
return contents
def getManifest(self,batchTag,sourceNamespace,setNamespace):
url = self.endpointURI + "/static/" + str(sourceNamespace) + "/" + str(setNamespace) + "/" + str(batchTag) + "/manifest"
urlCheck = Utils.checkURI(url)
if not urlCheck:
return False
contents = Utils.getContent(url)
return contents
def addCapability(self,capURL,sourceNamespace,setNamespace,capType):
logger.info("Adding capability with url:" + str(capURL))
record = None
message = None
try:
Utils.checkURI(capURL)
except Exception as e:
logger.warning("Capability URL did not validate. url: " + str(capURL) + " ERROR: " + str(e))
raise Exception("Capability URL did not validate. url: " + str(capURL) + " ERROR: " + str(e))
params = {'sourceNamespace' : sourceNamespace, 'setNamespace' : setNamespace, 'uri': capURL, 'capabilityType':capType}
try:
response = Utils.postRSData(self.capabilityURI,params)
except Exception as e:
logger.critical("Could not add capability. capabiltyURI: " + str(self.capabilityURI) + " ERROR: " + str(e))
raise BadResourceURL(str(e))
record = Utils.getJSONFromResponse(response)
message = self.getMessage(record)
if message:
logger.warning(message)
return record, message
def deleteCapability(self,capURL,sourceNamespace,setNamespace):
pass
def getCapabilities(self,**kwargs):
pass
def what(self):
print("This is a RSRestClient.")
| [
"[email protected]"
] | |
8cb15969e90d1fb204f6d134054b16e7beb7e3b6 | 806132bffbabefa5750154839e276aae8edb5570 | /src/tree.py | a5e127fb1622bbe87c05a5a6de30888c7706f35f | [] | no_license | Tarrasch/ravens-test | f5d30215b206102439f4aeb4cdd020c703b1881c | 951a72a6803745b2e5318a74ab05d56915e7ecd5 | refs/heads/master | 2020-06-01T04:20:25.901271 | 2012-11-20T12:44:46 | 2012-11-20T12:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | def map_tree(f, tree):
grid = tree['grid']
alts = [tree[k] for k in range(100) if tree.has_key(k)]
grid = map(lambda xs: map(lambda x: f(x), xs), grid)
alts = map(lambda x: f(x), alts)
return dict([('grid', grid)] + [(i, alts[i-1]) for i in range(1,len(alts)+1)])
def collapse_tree(tree):
grid = tree['grid']
alts = [tree[k] for k in range(100) if tree.has_key(k)]
return sum(grid, []) + alts
| [
"[email protected]"
] | |
6e3f6c3449d7f93848d1116b344a0dcabece60f2 | 6cf86e6122b3c65853231481ff73d40a25374eb1 | /Input().py | 068cf37daec039fb7b1eb52c376b116f2b161ace | [] | no_license | npc203/hackerrank-one-liners | d9480ce71cde342458689250963d1f69f3a38093 | a7eb66c8d1bfa3508cae28ff6160db2728df3b5b | refs/heads/main | 2023-07-17T12:03:03.757519 | 2021-08-20T12:30:18 | 2021-08-20T12:30:48 | 375,340,120 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | (lambda x,b : print(eval(input()) == b))(*map(int,input().split())) | [
"[email protected]"
] | |
0f1fbd0076898caca22bdff66e84b8f1ca8cade5 | d060c02ffd05cca79f469a4cd8d26827b3f3c3e4 | /job/schema.py | c15f7516218d5b5d2d4843e0874976b6f3420dba | [] | no_license | symek/job-cli | 4a2a190cf30348e5b2ca27c6c67c081599da495a | dfe2f629bd0a9956bddedd9b3d5544c3b91769d7 | refs/heads/master | 2021-01-13T04:11:46.571599 | 2020-11-17T16:11:12 | 2020-11-17T16:11:12 | 77,702,058 | 0 | 1 | null | 2017-03-03T12:59:48 | 2016-12-30T18:01:20 | Python | UTF-8 | Python | false | false | 5,852 | py |
from schematics.models import Model
from schematics.types import StringType, URLType, BooleanType, BaseType, IntType
from schematics.types import UUIDType, DateTimeType, TimestampType
from schematics.types import ListType, ModelType, DictType, PolyModelType
from schematics import exceptions
import json
import collections
class PathTemplateType(BaseType):
"""A field that stores a valid path expression value.
"""
MESSAGES = {
"path_template": "Value must be valid path template expression using @WORD/>",
}
def validate_pathtemplate(self, value):
#TODO: make it useful
if not value:
return
if not value.startswith("@"): # or not "/>" in value:
raise exceptions.ValidationError("This doesn't seem to be path template.")
def _mock(self, context=None):
return value
def to_native(self, value, context=None):
if not isinstance(value, str):
try:
value = str(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['convert'].format(value))
return value
def to_primitive(self, value, context=None):
""" Shell we actually render template to final shape here?
"""
return str(value)
class PermissionModel(Model):
group = BooleanType(required=False)
others = BooleanType(required=False)
class OwnershipModel(Model):
user = StringType(required=False)
group = StringType(required=False)
class InlineOptionsModel(Model):
ownership = ModelType(OwnershipModel, required=False)
permission = ModelType(PermissionModel, required=False)
link_target = PathTemplateType(required=False)
class SchemaInlineModel(Model):
type = StringType(required=True)
name = StringType(required=True)
options = ModelType(InlineOptionsModel, required=False)
class SchemaModel(Model):
""" Basic model for all LocationTemplates except JobTemplate.
This was forced by Schamatics and has nice side effects
of controlling presence of keys on different levels
(for a price of generality and elegance).
"""
version = StringType(required=True)
names = ListType(StringType, required=True)
sub_dirs = ListType(ModelType(SchemaInlineModel), required=False)
user_dirs = BooleanType(required=False)
ownership = ModelType(OwnershipModel, required=False)
permission = ModelType(PermissionModel, required=False)
# This must be always presnt of children
# will createa own link instead of taking it from parent.
is_link = BooleanType(required=True)
link_root = StringType(required=False)
link_target = PathTemplateType(required=False)
path_template = PathTemplateType(required=False)
local_schema_path = DictType(PathTemplateType, required=False)
root = StringType(required=False)
#tmp:
log_level = IntType(required=False)
class JobSchemaModel(Model):
""" Schema model valid only for parent (job) template.
It has to be separated, so thus some important keys
like job_current counldn't be overwritten by children
templates.
"""
version = StringType(required=True)
names = ListType(StringType, required=True)
sub_dirs = ListType(ModelType(SchemaInlineModel), required=False)
job_current = StringType(required=True)
job_asset_type = StringType(required=True)
job_asset_name = StringType(required=True)
user_dirs = BooleanType(required=True)
ownership = ModelType(OwnershipModel)
permission = ModelType(PermissionModel)
is_link = BooleanType(required=True)
link_root = StringType(required=False)
link_target = PathTemplateType(required=False)
asset_id_template = ListType(StringType, required=False)
path_template = PathTemplateType(required=False)
local_schema_path = DictType(PathTemplateType, required=False)
root = StringType(required=False)
#tmp:
log_level = StringType(required=False)
class StaticDict(collections.MutableMapping):
def __init__(self, data):
self.__data = data
def __len__(self):
return len(self.__data)
def __iter__(self):
return iter(self.__data)
def __setitem__(self, k, v):
if k not in self.__data:
raise KeyError(k)
self.__data[k] = v
def __delitem__(self, k):
raise NotImplementedError
def __getitem__(self, k):
return self.__data[k]
def __contains__(self, k):
return k in self.__data
class Factory(object):
def __init__(self, log_level="INFO"):
from logger import LoggerFactory
self.logger = LoggerFactory().get_logger("SchemaFactory", log_level)
def find(self, schema, version=None, be_nice=False, verbose=False):
""" Returns to a caller (LocationTemplate subclasses mostly)
a validated models made from provided dictonary. Raise
exeption if no valid schema could be provided.
"""
# Try first Job schema, it it fails, try child model:
# This is because some fields are dengeours
# (job,group,asset ids mainly)
self.logger.debug("Using %s version", version)
# FIXME: how to make it nicely?
if "job_current" in schema:
model = JobSchemaModel(schema)
else:
model = SchemaModel(schema)
error = None
error = model.validate()
if not error:
if verbose:
print json.dumps(model.to_primitive(), indent=4)
return StaticDict(model.to_primitive())
if be_nice:
return StaticDict(schema)
return None
| [
"[email protected]"
] | |
42ee0b0d809863a628c4d9a10375863e7328db4a | fb54704d4a6f9475f42b85d8c470e3425b37dcae | /medium/ex46.py | b8f578eefedb0af0bc3a15588f48718e85d76ec0 | [] | no_license | ziyuan-shen/leetcode_algorithm_python_solution | b2784071a94b04e687fd536b57e8d5a9ec1a4c05 | 920b65db80031fad45d495431eda8d3fb4ef06e5 | refs/heads/master | 2021-06-27T05:19:47.774044 | 2021-02-04T09:47:30 | 2021-02-04T09:47:30 | 210,991,299 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
ans = {(nums[i],): nums[:i] + nums[i+1:] for i in range(len(nums))}
for _ in range(len(nums)-1):
for permute in list(ans):
remaining = ans[permute]
for i in range(len(remaining)):
ans[permute+(remaining[i],)] = remaining[:i] + remaining[i+1:]
ans.pop(permute)
return [list(permute) for permute in ans] | [
"[email protected]"
] | |
363145fc67b5fbf0353580112bd509dcb8673d4b | 41e0c435a52fdaa3d698faf17308bec2968d1d39 | /tempest/services/compute/v3/xml/keypairs_client.py | 6efb7fea04767a2d49fb6362a0d057b8f1efd00b | [
"Apache-2.0"
] | permissive | BeenzSyed/tempest | 26092eabaeb3963f1967cdbdf36395cdc5b701c3 | 7a64ee1216d844f6b99928b53f5c665b84cb8719 | refs/heads/master | 2021-01-22T08:06:59.627280 | 2015-03-23T22:36:09 | 2015-03-23T22:36:09 | 19,959,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
class KeyPairsV3ClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(KeyPairsV3ClientXML, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_v3_type
def list_keypairs(self):
resp, body = self.get("keypairs", self.headers)
node = etree.fromstring(body)
body = [{'keypair': xml_to_json(x)} for x in node.getchildren()]
return resp, body
def get_keypair(self, key_name):
resp, body = self.get("keypairs/%s" % str(key_name), self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
def create_keypair(self, name, pub_key=None):
doc = Document()
keypair_element = Element("keypair")
if pub_key:
public_key_element = Element("public_key")
public_key_text = Text(pub_key)
public_key_element.append(public_key_text)
keypair_element.append(public_key_element)
name_element = Element("name")
name_text = Text(name)
name_element.append(name_text)
keypair_element.append(name_element)
doc.append(keypair_element)
resp, body = self.post("keypairs",
headers=self.headers, body=str(doc))
body = xml_to_json(etree.fromstring(body))
return resp, body
def delete_keypair(self, key_name):
return self.delete("keypairs/%s" % str(key_name))
| [
"[email protected]"
] | |
9b2fa84da02cc1dc0fefaa6972b0474d32d36b06 | 634425d7a7dbc99c45da4d27924af5a3e6112fca | /Linear_classifiers/(b) linear classifiers.py | 6b9abb4cd105d1c441fbf88837c0b48f0b1847fb | [] | no_license | fahimalamabir/datacamp | dc342645796c37401156bf79d7c5d20f262e4469 | 8224a69801d6a4236cf3e99bebd4f149cc846b4b | refs/heads/main | 2023-06-15T16:26:18.271836 | 2021-07-06T22:09:31 | 2021-07-06T22:09:31 | 318,082,895 | 0 | 0 | null | 2020-12-04T02:06:26 | 2020-12-03T05:15:50 | Jupyter Notebook | UTF-8 | Python | false | false | 5,107 | py | '''
How models make predictions
Which classifiers make predictions based on the sign (positive or negative) of the raw model output?
ANSWER THE QUESTION
50 XP
Possible Answers
Logistic regression only
press 1
Linear SVMs only
press 2
Neither
press 3
Both logistic regression and Linear SVMs
press 4
Submit Answer
Take Hint (-15 XP)
INCORRECT SUBMISSION
Remember, logistic regression and linear SVMs make predictions in the same way.
'''
ans = 4
"""
Changing the model coefficients
In this exercise, you will observe the effects of changing the coefficients of a linear classifer. A 2D dataset is already loaded into the environment as X and y, along with a linear classifier object model.
INSTRUCTIONS
0 XP
Explore the effects of changing the two coefficients and the intercept.
Set the coefficients and intercept so that the model makes no errors.
HINT
The first element of model.coef_ should be a negative number, and the second element should be a positive number. Remember that coef_ controls the angle of the boundary and intercept_ shifts the boundary without changing the angle.
"""
# Set the coefficients
model.coef_ = np.array([[-1,1]])
model.intercept_ = np.array([-3])
# Plot the data and decision boundary
plot_classifier(X,y,model)
# Print the number of errors
num_err = np.sum(y != model.predict(X))
print("Number of errors:", num_err)
"""
The 0-1 loss
In the figure below, what is the 0-1 loss (number of classification errors) of the classifier?
ANSWER THE QUESTION
35 XP
Possible Answers
0
press 1
1
press 2
2
press 3
3
press 4
Submit Answer
HINT
There is one red point predicted to be blue, and one blue point predicted to be red.
"""
ans = 3
"""
Minimizing a loss function
In this exercise you'll implement linear regression "from scratch" using scipy.optimize.minimize. We'll train a model on the Boston housing price data set, which is already loaded into the variables X and y. For simplicity, we won't include an intercept in our regression model.
INSTRUCTIONS
0 XP
Fill in the loss function for least squares linear regression.
Fill in the call to minimize.
Compare the coefficients to sklearn's LinearRegression.
HINT
The loss is the square of the difference between y[i] and predicted_y.
"""
# The squared error, summed over training examples
def my_loss(w):
s = 0
for i in range(y.size):
predicted_y_i = w@X[i]
s = s + (predicted_y_i - y[i])**2
return s
# Returns the w that makes my_loss(w) smallest
w_fit = minimize(my_loss, X[0]).x
print(w_fit)
# Compare with scikit-learn's LinearRegression
lr = LinearRegression(fit_intercept=False).fit(X,y)
print(lr.coef_)
"""
Classification loss functions
Which of the four loss functions makes sense for classification?
ANSWER THE QUESTION
35 XP
Possible Answers
(1)
press 1
(2)
press 2
(3)
press 3
(4)
press 4
Submit Answer
HINT
You're looking for a loss that prefers (has lower values for) correct predictions and higher values for incorrect predictions.
"""
ans = 2
"""
Comparing the logistic and hinge losses
In this exercise you'll create a plot of the logistic and hinge losses using their mathematical expressions, which are provided to you. The loss function diagram from the video is shown on the right.
INSTRUCTIONS
0 XP
Plot the logistic and hinge losses evaluated on the grid points.
HINT
Use the provided log_loss and hinge_loss functions.
"""
# Mathematical functions for logistic and hinge losses
# Feel free to ignore if you're not interested
def log_loss(raw_model_output):
return np.log(1+np.exp(-raw_model_output))
def hinge_loss(raw_model_output):
return np.maximum(0,1-raw_model_output)
# Create a grid of values and plot
grid = np.linspace(-2,2,1000)
plt.plot(grid, log_loss(grid), label='logistic')
plt.plot(grid, hinge_loss(grid), label='hinge')
plt.legend()
plt.show()
"""
Implementing logistic regression
This is very similar to the earlier exercise where you implemented linear regression "from scratch" using scipy.optimize.minimize. However, this time we'll minimize the logistic loss and compare with scikit-learn's LogisticRegression (we've set C to a large value to disable regularization; more on this in Chapter 3!). The log_loss function from the previous exercise is already defined in your environment, and the sklearn breast cancer prediction dataset (first 10 features, standardized) is loaded into the variables X and y.
INSTRUCTIONS
0 XP
Fill in the loss function for logistic regression.
Compare the coefficients to sklearn's LogisticRegression.
HINT
There are several ways to get the number of training examples, such as y.size, len(y), or len(X).
Call log_loss, which is already defined for you.
"""
# The logistic loss, summed over training examples
def my_loss(w):
s = 0
for i in range(y.size):
raw_model_output = w@X[i]
s = s + log_loss(raw_model_output * y[i])
return s
# Returns the w that makes my_loss(w) smallest
w_fit = minimize(my_loss, X[0]).x
print(w_fit)
# Compare with scikit-learn's LogisticRegression
lr = LogisticRegression(fit_intercept=False, C=1000000).fit(X,y)
print(lr.coef_)
| [
"[email protected]"
] | |
123d9fedfe67be980e7dab0084a0da66acb721fb | 0fb4b731939d1b38e219d3fcecf2990f06f17038 | /visualization/python/visualize_daily.py | eeff3a1d501a9c0fd1f6a78af36e9bbc540aef9e | [
"Apache-2.0"
] | permissive | saitejaleo/coronavirus-dashboard | 218edd7953e7e4385d9dfbd8008431aa53b6200c | f320dbdc785df954b005594b6f0bdf8330122cfb | refs/heads/master | 2022-07-18T13:59:42.728258 | 2020-05-17T18:59:20 | 2020-05-17T18:59:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
# fig_width=8
# fig_height=3
# value_text_size=9
rotation_degree=90
data_file_name = '../../data/ohio.json'
num_cases_file_name='../../figure/num_cases.svg'
num_new_cases_file_name='../../figure/num_new_cases.svg'
num_new_avg_7d_cases_file_name='../../figure/num_new_avg_7d_cases.svg' # number of average new cases in previous 7 days
# num_counties_file_name='../../figure/num_counties.svg'
num_icu_file_name='../../figure/num_icu.svg'
num_hospitalizations_file_name='../../figure/num_hospitalizations.svg'
num_death_file_name='../../figure/num_death.svg'
with open(data_file_name, 'r') as data_file:
data=json.load(data_file)
if data is not None:
daily_entries=data['daily']
else:
daily_entries=[]
df = pd.DataFrame(daily_entries)
## num of total cases
x=df['date']
y=df['num_cases']
# plt.figure(figsize=(fig_width, fig_height))
plt.title('Confirmed Cases in Ohio')
plt.plot(x, y, marker='.', markersize=12, color='red', linewidth=2, label='Total Confirmed Cases')
# plt.legend()
bottom, top = plt.ylim()
plt.ylim(0, top*1.1)
#plt.xlabel('Date')
plt.xticks(rotation=rotation_degree)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for i,j in zip(x,y):
# plt.annotate(str(j),xy=(i,j))
# plt.text(i, j, str(j), size=value_text_size, ha='center', va='bottom')
plt.text(i, j, str(j), ha='center', va='bottom')
plt.tight_layout()
plt.savefig(num_cases_file_name)
plt.clf()
plt.cla()
plt.close()
## num of new cases
df['num_new_cases']=df['num_cases'].diff() # add newly confirmed cases
x=df['date']
z=df['num_new_cases']
plt.title('Newly Confirmed Cases in Ohio')
plt.plot(x, z, marker='.', markersize=12, color='orange', linewidth=2, label='Newly Confirmed Cases')
# plt.legend()
bottom, top = plt.ylim()
plt.ylim(0, top*1.1)
plt.xticks(rotation=rotation_degree)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for i,j in zip(x[1:],z[1:]):
plt.text(i, j, str(int(j)), ha='center', va='bottom')
plt.tight_layout()
plt.savefig(num_new_cases_file_name)
plt.clf()
plt.cla()
plt.close()
# number of average new cases in previous 7 days
df['num_new_avg_7d_cases']=df['num_cases'].diff(7)/7 # add newly confirmed cases
x=df['date']
z=df['num_new_avg_7d_cases']
plt.title('7-Day Average Newly Confirmed Cases in Ohio')
plt.plot(x, z, marker='.', markersize=12, color='gold', linewidth=2,label='Avg Daily New in Last 7 Days')
# plt.legend()
bottom, top = plt.ylim()
plt.ylim(0, top*1.1)
plt.xticks(rotation=rotation_degree)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for i,j in zip(x[8:],z[8:]):
plt.text(i, j, str(int(j)), ha='center', va='bottom')
plt.tight_layout()
plt.savefig(num_new_avg_7d_cases_file_name)
plt.clf()
plt.cla()
plt.close()
## num of total icu counts
y=df['num_icu'].dropna() # delete NaN entries
x=df['date'][len(df['date'])-len(y):] # subarray of dates with num_icu available
# plt.figure(figsize=(fig_width, fig_height))
plt.title('Number of ICU admissions')
plt.plot(x, y, marker='.', markersize=12, color='tan', linewidth=2)
bottom, top = plt.ylim()
plt.ylim(0, top*1.1)
#plt.xlabel('Date')
plt.xticks(rotation=rotation_degree)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for i,j in zip(x,y):
# plt.annotate(str(j),xy=(i,j))
plt.text(i, j, str(int(j)), ha='center', va='bottom')
plt.tight_layout()
plt.savefig(num_icu_file_name)
plt.clf()
plt.cla()
plt.close()
x=df['date']
y=df['num_hospitalizations']
# plt.figure(figsize=(fig_width, fig_height))
plt.title('Number of Hospitalizations in Ohio')
plt.plot(x, y, marker='.', markersize=12, color='olive', linewidth=2)
bottom, top = plt.ylim()
plt.ylim(0, top*1.1)
plt.xticks(rotation=rotation_degree)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for i,j in zip(x,y):
plt.text(i, j, str(j), ha='center', va='bottom')
plt.tight_layout()
plt.savefig(num_hospitalizations_file_name)
plt.clf()
plt.cla()
plt.close()
x=df['date']
y=df['num_death']
# plt.figure(figsize=(fig_width, fig_height))
plt.plot(x, y, marker='^', color='grey', linewidth=2)
plt.title('Number of Deaths')
bottom, top = plt.ylim()
plt.ylim(0, top*1.1)
plt.xticks(rotation=rotation_degree)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for i,j in zip(x,y):
plt.text(i, j, str(j), ha='center', va='bottom')
plt.tight_layout()
plt.savefig(num_death_file_name)
plt.clf()
plt.cla()
plt.close()
| [
"[email protected]"
] | |
27bb8d83d6f836d2e2dc817c8c5c463d2a65544f | d030f5b120dbbb62f95fd964bde379637846de06 | /ROSservice/catkin_ws/devel/lib/python2.7/dist-packages/face1/srv/__init__.py | 3337d4fca6327faac64425e4c087ba544643def5 | [] | no_license | cd74/ROSface | 29a40d16dd75f564cd24d15f01349d8b4a4e491e | 1506120c2bb9c13935849f2e4698ef1add551561 | refs/heads/master | 2022-04-25T01:33:49.269936 | 2020-04-27T06:15:38 | 2020-04-27T06:15:38 | 258,939,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | from ._FaceSrv1 import *
| [
"[email protected]"
] | |
114b4c677ac9ad89733f6b700813000b37a0f4b4 | 7f7fc72cf2f2f06ef7eb5d852d0bd2caf3f2daf9 | /sirepo/runner.py | 6b4f1b20aa57674dc9faee958a025c976818c02e | [
"Apache-2.0"
] | permissive | kalebswartz7/sirepo | 4bcd41113ba93a3f7bcfa47df27e79805e1e4f50 | 8d1f2b3914cf9622eaae6b0bf32e23e38e4e5972 | refs/heads/master | 2020-03-19T08:31:41.409642 | 2018-07-20T19:32:48 | 2018-07-20T19:32:48 | 136,211,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,268 | py | # -*- coding: utf-8 -*-
u"""Run jobs
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
decouple so can start any type of job
add is_background_import to simulation_db
select docker for that if configured and not background
need to have hard constraints on the docker container
runner.init_job() does the dispatch
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcli
from pykern import pkcollections
from pykern import pkconfig
from pykern import pkio
from pykern import pkjinja
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo import simulation_db
from sirepo.template import template_common
import aenum
import errno
import os
import pwd
import signal
import subprocess
import sys
import threading
import time
import uuid
#: Configuration
cfg = None
# Map of jid to instance
_job_map = pkcollections.Dict()
_job_map_lock = threading.RLock()
class State(aenum.UniqueEnum):
INIT = 1
START = 2
KILL = 3
RUN = 4
STOP = 5
# how long to wait before assuming thread that created
# job is dead.
_INIT_TOO_LONG_SECS = 5
# time expected between created and running
_DOCKER_CREATED_TOO_LONG_SECS = _INIT_TOO_LONG_SECS
# how long to wait after first kill (TERM) to second kill (KILL)
_KILL_TIMEOUT_SECS = 3
# prefix all report names
_DOCKER_CONTAINER_PREFIX = 'srjob-'
_MAX_OPEN_FILES = 1024
@pkconfig.parse_none
def cfg_job_class(value):
"""Return job queue class based on name
Args:
value (object): May be class or str.
Returns:
object: `Background` or `Celery` class.
"""
if isinstance(value, type) and issubclass(value, (Celery, Background)):
# Already initialized but may call initializer with original object
return value
if value == 'Celery':
if pkconfig.channel_in('dev'):
_assert_celery()
return Celery
elif value == 'Docker':
return Docker
elif value == 'Background':
signal.signal(signal.SIGCHLD, Background._sigchld_handler)
return Background
elif value is None:
return None
else:
raise AssertionError('{}: unknown job_class'.format(value))
def init(app, uwsgi):
"""Initialize module"""
if cfg.job_class is None:
from sirepo import server
d = 'Background'
if server.cfg.job_queue:
# Handle deprecated case
d = server.cfg.job_queue
cfg.job_class = cfg_job_class(d)
assert not uwsgi or not issubclass(cfg.job_class, Background), \
'uwsgi does not work if sirepo.runner.cfg.job_class=Background'
def job_is_processing(jid):
with _job_map_lock:
try:
job = _job_map[jid]
except KeyError:
return False
return job.is_processing()
def job_kill(jid):
"""Terminate job
Args:
jid (str): see `simulation_db.job_id`
"""
with _job_map_lock:
try:
job = _job_map[jid]
except KeyError:
return
job.kill()
def job_race_condition_reap(jid):
return job_kill(jid)
def job_start(data):
with _job_map_lock:
jid = simulation_db.job_id(data)
if jid in _job_map:
#TODO(robnagler) assumes external check of is_processing,
# which server._simulation_run_status does do, but this
# could be cleaner. Really want a reliable daemon thread
# to manage all this.
raise Collision(jid)
job = cfg.job_class(jid, data)
_job_map[jid] = job
job.start()
class Base(object):
"""Super of all job classes"""
def __init__(self, jid, data):
self.data = data
self.jid = jid
self.lock = threading.RLock()
self.set_state(State.INIT)
def is_processing(self):
with self.lock:
if self.state == State.RUN:
if self._is_processing():
return True
elif self.state == State.INIT:
if time.time() < self.state_changed + _INIT_TOO_LONG_SECS:
return True
else:
assert self.state in (State.START, State.KILL, State.STOP), \
'{}: invalid state for jid='.format(self.state, self.jid)
# reap the process in a non-running state
self.kill()
return False
def kill(self):
with self.lock:
if self.state in (State.RUN, State.START, State.KILL):
# normal case (RUN) or thread died while trying to kill job
self._kill()
elif not self.state in (State.INIT, State.STOP):
raise AssertionError(
'{}: invalid state for jid='.format(self.state, self.jid),
)
self.set_state(State.STOP)
with _job_map_lock:
try:
if self == _job_map[self.jid]:
del _job_map[self.jid]
except KeyError:
# stopped and no longer in map
return
def set_state(self, state):
self.state = state
self.state_changed = time.time()
def start(self):
with self.lock:
if self.state == State.STOP:
# Something killed between INIT and START so don't start
return
elif self.state in (State.KILL, State.RUN):
# normal case (RUN) or race condition on start/kill
# with a thread that died while trying to kill this
# job before it was started. Have to finish the KILL.
self.kill()
return
else:
# race condition that doesn't seem possible
assert self.state == State.INIT, \
'{}: unexpected state for jid={}'.format(self.state, self.jid)
self.set_state(State.START)
self.cmd, self.run_dir = simulation_db.prepare_simulation(self.data)
self._start()
self.set_state(State.RUN)
class Background(Base):
"""Run as subprocess"""
def _is_processing(self):
try:
os.kill(self.pid, 0)
except OSError:
self.pid = 0
return False
return True
def _kill(self):
if self.pid == 0:
return
pid = self.pid
for sig in (signal.SIGTERM, signal.SIGKILL):
try:
pkdlog('{}: kill {} pid={}', self.jid, sig, self.pid)
os.kill(self.pid, sig)
for j in range(_KILL_TIMEOUT_SECS):
time.sleep(1)
pid, status = os.waitpid(self.pid, os.WNOHANG)
if pid != 0:
break
else:
continue
if pid == self.pid:
pkdlog('{}: waitpid: status={}', pid, status)
self.pid = 0
break
else:
pkdlog(
'pid={} status={}: unexpected waitpid result; job={} pid={}',
pid,
status,
self.jid,
self.pid,
)
except OSError as e:
if not e.errno in (errno.ESRCH, errno.ECHILD):
raise
# reaped by _sigchld_handler()
return
@classmethod
def _sigchld_handler(cls, signum=None, frame=None):
try:
with _job_map_lock:
if not _job_map:
# Can't be our job so don't waitpid.
# Only important at startup, when other modules
# are doing popens, which does a waitpid.
# see radiasoft/sirepo#681
return
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0:
# a process that was reaped before sigchld called
return
for self in _job_map.values():
# state of 'pid' is unknown since outside self.lock
if isinstance(self, Background) and getattr(self, 'pid', 0) == pid:
pkdlog('{}: waitpid pid={} status={}', self.jid, pid, status)
break
else:
pkdlog('pid={} status={}: unexpected waitpid', pid, status)
return
with self.lock:
self.pid = 0
self.kill()
except OSError as e:
if not e.errno in (errno,ESRCH, errno.ECHILD):
pkdlog('waitpid: OSError: {} errno={}', e.strerror, e.errno)
def _start(self):
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
We don't use pksubprocess. This method is not called from the MainThread
so can't set signals.
"""
try:
pid = os.fork()
except OSError as e:
pkdlog('{}: fork OSError: {} errno={}', self.jid, e.strerror, e.errno)
reraise
if pid != 0:
pkdlog('{}: started: pid={} cmd={}', self.jid, pid, self.cmd)
self.pid = pid
return
try:
os.chdir(str(self.run_dir))
#Don't os.setsid() so signals propagate properly
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = _MAX_OPEN_FILES
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError:
pass
sys.stdin = open(template_common.RUN_LOG, 'a+')
assert sys.stdin.fileno() == 0
os.dup2(0, 1)
sys.stdout = os.fdopen(1, 'a+')
os.dup2(0, 2)
sys.stderr = os.fdopen(2, 'a+')
pkdlog('{}: child will exec: {}', self.jid, self.cmd)
sys.stderr.flush()
try:
simulation_db.write_status('running', self.run_dir)
os.execvp(self.cmd[0], self.cmd)
finally:
pkdlog('{}: execvp error: {} errno={}', self.jid, e.strerror, e.errno)
sys.exit(1)
except BaseException as e:
with open(str(self.run_dir.join(template_common.RUN_LOG)), 'a') as f:
f.write('{}: error starting simulation: {}'.format(self.jid, e))
raise
class Celery(Base):
"""Run job in Celery (prod)"""
def _is_processing(self):
"""Job is either in the queue or running"""
res = getattr(self, 'async_result', None)
return res and not res.ready()
def _kill(self):
from celery.exceptions import TimeoutError
if not self._is_processing():
return False
res = self.async_result
tid = getattr(res, 'task_id', None)
pkdlog('{}: kill SIGTERM tid={}', self.jid, tid)
try:
res.revoke(terminate=True, wait=True, timeout=_KILL_TIMEOUT_SECS, signal='SIGTERM')
except TimeoutError as e:
pkdlog('{}: kill SIGKILL tid={}', self.jid, tid)
res.revoke(terminate=True, signal='SIGKILL')
def _start(self):
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
from sirepo import celery_tasks
self.celery_queue = simulation_db.celery_queue(self.data)
self.async_result = celery_tasks.start_simulation.apply_async(
args=[self.cmd, str(self.run_dir)],
queue=self.celery_queue,
)
pkdc(
'{}: started tid={} dir={} queue={} len_jobs={}',
self.jid,
self.async_result.task_id,
self.run_dir,
self.celery_queue,
len(_job_map),
)
class Collision(Exception):
"""Avoid using a mutex"""
pass
class Docker(Base):
"""Run a code in docker"""
def _is_processing(self):
"""Inspect container to see if still in running state"""
out = self.__docker(['inspect', '--format={{.State.Status}}', self.cid])
if not out:
self.cid = None
return False
if out == 'running':
return True
if out == 'created':
return time.time() < self.state_changed + _DOCKER_CREATED_TOO_LONG_SECS
return False
def _kill(self):
if self.cid:
pkdlog('{}: stop cid={}', self.jid, self.cid)
self.__docker(['stop', '--time={}'.format(_KILL_TIMEOUT_SECS), self.cid])
self.cid = None
def _start(self):
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
#POSIT: jid is valid docker name (word chars and dash)
self.cname = _DOCKER_CONTAINER_PREFIX + self.jid
ctx = pkcollections.Dict(
kill_secs=_KILL_TIMEOUT_SECS,
run_dir=self.run_dir,
run_log=self.run_dir.join(template_common.RUN_LOG),
run_secs=self.__run_secs(),
sh_cmd=self.__sh_cmd(),
)
script = str(self.run_dir.join(_DOCKER_CONTAINER_PREFIX + 'run.sh'))
with open(str(script), 'wb') as f:
f.write(pkjinja.render_resource('runner/docker.sh', ctx))
cmd = [
'run',
#TODO(robnagler) configurable
'--cpus=1',
'--detach',
'--init',
'--log-driver=json-file',
# never should be large, just for output of the monitor
'--log-opt=max-size=1m',
'--memory=1g',
'--name=' + self.cname,
'--network=none',
'--rm',
'--ulimit=core=0',
#TODO(robnagler) this doesn't do anything
# '--ulimit=cpu=1',
'--ulimit=nofile={}'.format(_MAX_OPEN_FILES),
'--user=' + pwd.getpwuid(os.getuid()).pw_name,
] + self.__volumes() + [
#TODO(robnagler) make this configurable per code (would be structured)
self.__image(),
'bash',
script,
]
self.cid = self.__docker(cmd)
pkdc(
'{}: started cname={} cid={} dir={} len_jobs={} cmd={}',
self.jid,
self.cname,
self.cid,
self.run_dir,
len(_job_map),
' '.join(cmd),
)
def __docker(self, cmd):
cmd = ['docker'] + cmd
try:
pkdc('Running: {}', ' '.join(cmd))
return subprocess.check_output(
cmd,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
).rstrip()
except subprocess.CalledProcessError as e:
pkdlog('{}: failed: exit={} output={}', cmd, e.returncode, e.output)
return None
def __image(self):
res = cfg.docker_image
if ':' in res:
return res
return res + ':' + pkconfig.cfg.channel
def __run_secs(self):
if self.data['report'] == 'backgroundImport':
return cfg.import_secs
if simulation_db.is_parallel(self.data):
return cfg.parallel_secs
return cfg.sequential_secs
def __sh_cmd(self):
"""Convert ``self.cmd`` into a bash cmd"""
res = []
for c in self.cmd:
assert not "'" in c, \
'{}: sh_cmd contains a single quote'.format(cmd)
res.append("'{}'".format(c))
return ' '.join(res)
def __volumes(self):
res = []
def _res(src, tgt):
res.append('--volume={}:{}'.format(src, tgt))
if pkconfig.channel_in('dev'):
for v in '~/src', '~/.pyenv':
v = pkio.py_path('~/src')
# pyenv and src shouldn't be writable, only rundir
_res(v, v + ':ro')
_res(self.run_dir, self.run_dir)
return res
def _assert_celery():
"""Verify celery & rabbit are running"""
from sirepo import celery_tasks
import time
for x in range(10):
err = None
try:
if not celery_tasks.celery.control.ping():
err = 'You need to start Celery:\nsirepo service celery'
except Exception:
err = 'You need to start Rabbit:\nsirepo service rabbitmq'
# Rabbit doesn't have a long timeout, but celery ping does
time.sleep(.5)
if not err:
return
#TODO(robnagler) really should be pkconfig.Error() or something else
# but this prints a nice message. Don't call sys.exit, not nice
pkcli.command_error(err)
cfg = pkconfig.init(
docker_image=('radiasoft/sirepo', str, 'docker image to run all jobs'),
import_secs=(10, int, 'maximum runtime of backgroundImport'),
# default is set in init(), because of server.cfg.job_gueue
job_class=(None, cfg_job_class, 'how to run jobs: Celery or Background'),
parallel_secs=(3600, int, 'maximum runtime of serial job'),
sequential_secs=(300, int, 'maximum runtime of serial job'),
)
| [
"[email protected]"
] | |
a725500a2e349cbf8954f750c637d836326569bd | 6e485ffa2ce9559fdcc05f3650483c49c46b7102 | /db/note_model.py | 75c2f2557a1deed01b6a49192e6fd6930d664882 | [] | no_license | araneta/Tugas-statistik | 23164ec3f096903675be4dcdb76c1f6ad1cb6a81 | 573376a3d174d9b53428d21e221a3338ee392e67 | refs/heads/master | 2021-01-19T18:58:19.678755 | 2017-04-16T04:26:18 | 2017-04-16T04:26:18 | 88,391,000 | 0 | 0 | null | 2017-04-16T04:28:48 | 2017-04-16T04:28:48 | null | UTF-8 | Python | false | false | 2,631 | py | from database import *
import datetime
class NoteModel:
def __init__(self):
self.db = db
self.cursor = cursor
def all_post(self):
sql = "select * from note where id_user = 1 order by tanggal_diperbaharui desc"
try :
self.cursor.execute(sql)
temp_results = self.cursor.fetchall()
results = []
i = 1
for row in temp_results:
num = i
results.append({'num':num, 'id_note':row[0], 'judul':row[2], 'tag':row[3],'isi':row[4], 'tanggal_dibuat':row[5], 'tanggal_diubah':row[6]})
return results
except:
print "Error : Gak kenek dobol"
def get_post_by_id(self, id_note):
sql = "select * from note where id_user = 1 and id_note = %d order by tanggal_diperbaharui desc" % (id_note)
try :
self.cursor.execute(sql)
row = self.cursor.fetchone()
results = {}
results.update({'id_note':row[0], 'judul':row[2], 'tag':row[3], 'isi':row[4], 'tanggal_dibuat':row[5], 'tanggal_diubah':row[6]})
return results
except:
print "Error : tidak bisa mengambil data"
def insert_post(self, judul, tag, isi):
temp_tanggal = datetime.datetime.now()
sql = "insert into note (id_user, judul, tag, isi, tanggal_dibuat, tanggal_diperbaharui) values (%d, '%s', '%s', '%s', '%s', '%s')" % (1, judul, tag, isi, temp_tanggal.strftime('%Y-%m-%d %H-%M-%S'), temp_tanggal.strftime('%Y-%m-%d %H-%M-%S'))
try:
self.cursor.execute(sql)
db.commit()
print "Info : data berhasil diisikan.."
except:
db.rollback()
print "Error : pengisian data gagal.."
def update_post(self, judul, tag, isi, id_note):
temp_tanggal = datetime.datetime.now()
sql = "update note set judul='%s', tag='%s', isi='%s', tanggal_diperbaharui='%s' where id_note=%d" %(judul, tag, isi, temp_tanggal.strftime('%Y-%m-%d %H-%M-%S'), int(id_note))
try:
self.cursor.execute(sql)
db.commit()
print "Info : data berhasil diubah.."
except:
db.rollback()
print "Error : pengubahan data gagal.."
def delete_post(self, id_note):
sql = "delete from note where id_note=%d" % (int(id_note))
try:
self.cursor.execute(sql)
db.commit()
print "Info : data berhasil dihapus.."
except:
db.rollback()
print "Error : penghapusan data gagal.."
def get_post_by_tag(self, tag):
sql = "select * from note where tag like '%%%s%%'" % (tag)
try :
self.cursor.execute(sql)
temp_results = self.cursor.fetchall()
results = []
i = 1
for row in temp_results:
num = i
results.append({'num':num, 'id_note':row[0], 'judul':row[2], 'tag':row[3], 'isi':row[4], 'tanggal_dibuat':row[5], 'tanggal_diubah':row[6]})
return results
except:
print "Error : Gak iso njupuk data"
| [
"[email protected]"
] | |
b1fc4028b00d66db57ef3d4fca7602a0b3de1815 | 8eb0f65096f9a9fe90a88c85dcdcaf12f9a8a512 | /apps/maintenance_mode/middleware.py | cd0e09b3e8aba28e3af198050f46e5958f5de4a4 | [
"MIT"
] | permissive | WhitespaceCrew/django-htk | 57c8cc9ec30b4cd9511b717978758c47144de76f | 6a7b87a3d0a2e4cb51f6b8059708a26fa8e613df | refs/heads/master | 2020-12-31T01:10:14.900413 | 2016-02-03T19:24:27 | 2016-02-03T19:25:02 | 45,211,442 | 0 | 0 | null | 2015-10-29T21:23:54 | 2015-10-29T21:23:54 | null | UTF-8 | Python | false | false | 873 | py | from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from htk.apps.maintenance_mode.utils import is_maintenance_mode
from htk.utils import htk_setting
class MaintenanceModeMiddleware(object):
"""Checks whether HTK_MAINTENANCE_MODE is set
If so, redirects to the HTK_MAINTENANCE_MODE_URL_NAME page
"""
def process_request(self, request):
maintenance_mode_page = reverse(htk_setting('HTK_MAINTENANCE_MODE_URL_NAME'))
response = None
if request.path == maintenance_mode_page:
if not is_maintenance_mode():
response = redirect('/')
else:
# already here
pass
else:
if is_maintenance_mode():
response = redirect(maintenance_mode_page)
else:
pass
return response
| [
"[email protected]"
] | |
71ff48d27a98e522cb1183c1508f3fd16ee598fa | 521a5abf021aff0e5bec6e4723efb2d95bc1c528 | /dva/urls.py | f5d1f059a314c2edc9fa63fd0894759abd496b16 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | CVML/DeepVideoAnalytics | be3ed41968a140328e25c22f2cb2be431a2b172d | d0969b503ed68dc9ee26279c341e1540bfefd4f6 | refs/heads/master | 2021-07-17T22:19:20.787228 | 2017-10-22T07:55:04 | 2017-10-22T07:55:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | """dva URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
admin.autodiscover()
urlpatterns = [url(r'^admin/', admin.site.urls),
url(r'^api/', include('dvaapp.urls')),
url(r'', include('dvaui.urls'))]+\
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DVA_PRIVATE_ENABLE:
urlpatterns.append(url(r'^apps/', include('dvap.urls')))
if settings.DEBUG and settings.MACOS:
import debug_toolbar
urlpatterns = [url(r'^__debug__/', include(debug_toolbar.urls)),] + urlpatterns
| [
"[email protected]"
] | |
b6b7520917496dbd41f7f57d11d8d68f84434ff7 | ee179dd9e9b24046508b11a60612da3758c7e122 | /lib/python2.7/site-packages/nltk/stem/api.py | c6032423e84a9a5b8a1985afcf341e4084970792 | [] | no_license | buhtigexa/Nerit | fcd6cb08a0935e5b80392ae2acf68ba52ee8a899 | d55629f6289c1fa6efe60802a78b79932ff248a2 | refs/heads/master | 2021-01-21T13:11:51.105930 | 2015-05-01T23:56:02 | 2015-05-01T23:56:02 | 34,728,820 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | # Natural Language Toolkit: Stemmer Interface
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Trevor Cohn <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
class StemmerI(object):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
raise NotImplementedError()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| [
"[email protected]"
] | |
528b509d132ef0c5142d6ca4858054d9bbee1dec | d12e1825321c6d275ec55f869311d2f3e30cfdab | /libs/deep_models/flow/lite_flow_net/lite_flow_net.py | 66fbad1997c550b7348257c8d40da8a04febe6f2 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Huangying-Zhan/DF-VO | b369e6a00ae0745288acf9d61460f633aa116875 | 50e6ffa9b5164a0dfb34d3215e86cc2288df256d | refs/heads/master | 2022-12-11T18:29:42.457460 | 2022-03-14T00:34:29 | 2022-03-14T00:34:29 | 212,502,276 | 494 | 128 | MIT | 2022-11-22T15:29:20 | 2019-10-03T05:18:35 | Python | UTF-8 | Python | false | false | 17,960 | py | import math
import torch
from . import correlation # the custom cost volume layer
Backward_tensorGrid = {}
def Backward(tensorInput, tensorFlow):
"""Backward warping, warp tensorInput according to the grid defined by tensorFlow
Args:
tensorInput (tensor): source data
tensorFlow (tensor): flow data
Returns:
tensorOutput (tensor): warped data
"""
if str(tensorFlow.size()) not in Backward_tensorGrid:
tensorHorizontal = torch.linspace(-1.0, 1.0, tensorFlow.size(3)).view(1, 1, 1, tensorFlow.size(3)).expand(tensorFlow.size(0), -1, tensorFlow.size(2), -1)
tensorVertical = torch.linspace(-1.0, 1.0, tensorFlow.size(2)).view(1, 1, tensorFlow.size(2), 1).expand(tensorFlow.size(0), -1, -1, tensorFlow.size(3))
Backward_tensorGrid[str(tensorFlow.size())] = torch.cat([ tensorHorizontal, tensorVertical ], 1).cuda()
tensorFlow = torch.cat([ tensorFlow[:, 0:1, :, :] / ((tensorInput.size(3) - 1.0) / 2.0), tensorFlow[:, 1:2, :, :] / ((tensorInput.size(2) - 1.0) / 2.0) ], 1)
return torch.nn.functional.grid_sample(input=tensorInput, grid=(Backward_tensorGrid[str(tensorFlow.size())] + tensorFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='zeros')
class LiteFlowNet(torch.nn.Module):
def __init__(self):
super(LiteFlowNet, self).__init__()
class Features(torch.nn.Module):
def __init__(self):
super(Features, self).__init__()
self.moduleOne = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, stride=1, padding=3),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleTwo = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleThr = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleFou = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleFiv = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleSix = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=192, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
def forward(self, tensorInput):
tensorOne = self.moduleOne(tensorInput)
tensorTwo = self.moduleTwo(tensorOne)
tensorThr = self.moduleThr(tensorTwo)
tensorFou = self.moduleFou(tensorThr)
tensorFiv = self.moduleFiv(tensorFou)
tensorSix = self.moduleSix(tensorFiv)
return [ tensorOne, tensorTwo, tensorThr, tensorFou, tensorFiv, tensorSix ]
class Matching(torch.nn.Module):
def __init__(self, intLevel):
super(Matching, self).__init__()
self.dblBackward = [ 0.0, 0.0, 10.0, 5.0, 2.5, 1.25, 0.625 ][intLevel]
if intLevel != 2:
self.moduleFeat = torch.nn.Sequential()
elif intLevel == 2:
self.moduleFeat = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1, stride=1, padding=0),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
if intLevel == 6:
self.moduleUpflow = None
elif intLevel != 6:
self.moduleUpflow = torch.nn.ConvTranspose2d(in_channels=2, out_channels=2, kernel_size=4, stride=2, padding=1, bias=False, groups=2)
if intLevel >= 4:
self.moduleUpcorr = None
elif intLevel < 4:
self.moduleUpcorr = torch.nn.ConvTranspose2d(in_channels=49, out_channels=49, kernel_size=4, stride=2, padding=1, bias=False, groups=49)
self.moduleMain = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=49, out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=2, kernel_size=[ 0, 0, 7, 5, 5, 3, 3 ][intLevel], stride=1, padding=[ 0, 0, 3, 2, 2, 1, 1 ][intLevel])
)
def forward(self, tensorFirst, tensorSecond, tensorFeaturesFirst, tensorFeaturesSecond, tensorFlow):
tensorFeaturesFirst = self.moduleFeat(tensorFeaturesFirst)
tensorFeaturesSecond = self.moduleFeat(tensorFeaturesSecond)
if tensorFlow is not None:
tensorFlow = self.moduleUpflow(tensorFlow)
if tensorFlow is not None:
tensorFeaturesSecond = Backward(tensorInput=tensorFeaturesSecond, tensorFlow=tensorFlow * self.dblBackward)
if self.moduleUpcorr is None:
tensorCorrelation = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tensorFirst=tensorFeaturesFirst, tensorSecond=tensorFeaturesSecond, intStride=1), negative_slope=0.1, inplace=False)
elif self.moduleUpcorr is not None:
tensorCorrelation = self.moduleUpcorr(torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tensorFirst=tensorFeaturesFirst, tensorSecond=tensorFeaturesSecond, intStride=2), negative_slope=0.1, inplace=False))
return (tensorFlow if tensorFlow is not None else 0.0) + self.moduleMain(tensorCorrelation)
class Subpixel(torch.nn.Module):
def __init__(self, intLevel):
super(Subpixel, self).__init__()
self.dblBackward = [ 0.0, 0.0, 10.0, 5.0, 2.5, 1.25, 0.625 ][intLevel]
if intLevel != 2:
self.moduleFeat = torch.nn.Sequential()
elif intLevel == 2:
self.moduleFeat = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1, stride=1, padding=0),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleMain = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=[ 0, 0, 130, 130, 194, 258, 386 ][intLevel], out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=2, kernel_size=[ 0, 0, 7, 5, 5, 3, 3 ][intLevel], stride=1, padding=[ 0, 0, 3, 2, 2, 1, 1 ][intLevel])
)
def forward(self, tensorFirst, tensorSecond, tensorFeaturesFirst, tensorFeaturesSecond, tensorFlow):
tensorFeaturesFirst = self.moduleFeat(tensorFeaturesFirst)
tensorFeaturesSecond = self.moduleFeat(tensorFeaturesSecond)
if tensorFlow is not None:
tensorFeaturesSecond = Backward(tensorInput=tensorFeaturesSecond, tensorFlow=tensorFlow * self.dblBackward)
return (tensorFlow if tensorFlow is not None else 0.0) + self.moduleMain(torch.cat([ tensorFeaturesFirst, tensorFeaturesSecond, tensorFlow ], 1))
class Regularization(torch.nn.Module):
def __init__(self, intLevel):
super(Regularization, self).__init__()
self.dblBackward = [ 0.0, 0.0, 10.0, 5.0, 2.5, 1.25, 0.625 ][intLevel]
self.intUnfold = [ 0, 0, 7, 5, 5, 3, 3 ][intLevel]
if intLevel >= 5:
self.moduleFeat = torch.nn.Sequential()
elif intLevel < 5:
self.moduleFeat = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=[ 0, 0, 32, 64, 96, 128, 192 ][intLevel], out_channels=128, kernel_size=1, stride=1, padding=0),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.moduleMain = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=[ 0, 0, 131, 131, 131, 131, 195 ][intLevel], out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
if intLevel >= 5:
self.moduleDist = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], kernel_size=[ 0, 0, 7, 5, 5, 3, 3 ][intLevel], stride=1, padding=[ 0, 0, 3, 2, 2, 1, 1 ][intLevel])
)
elif intLevel < 5:
self.moduleDist = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], kernel_size=([ 0, 0, 7, 5, 5, 3, 3 ][intLevel], 1), stride=1, padding=([ 0, 0, 3, 2, 2, 1, 1 ][intLevel], 0)),
torch.nn.Conv2d(in_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], out_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], kernel_size=(1, [ 0, 0, 7, 5, 5, 3, 3 ][intLevel]), stride=1, padding=(0, [ 0, 0, 3, 2, 2, 1, 1 ][intLevel]))
)
self.moduleScaleX = torch.nn.Conv2d(in_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], out_channels=1, kernel_size=1, stride=1, padding=0)
self.moduleScaleY = torch.nn.Conv2d(in_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], out_channels=1, kernel_size=1, stride=1, padding=0)
# eny
def forward(self, tensorFirst, tensorSecond, tensorFeaturesFirst, tensorFeaturesSecond, tensorFlow):
tensorDifference = (tensorFirst - Backward(tensorInput=tensorSecond, tensorFlow=tensorFlow * self.dblBackward))
tensorDifference = tensorDifference.pow(2.0).sum(1, True) + 1e-6
# tensorDifference.register_hook(lambda grad: print("1 max: {}".format(grad.max())))
# tensorDifference.register_hook(lambda grad: print("1 min: {}".format(grad.min())))
# tensorDifference = tensorDifference.sum(1, True)
# tensorDifference.register_hook(lambda grad: print("2 max: {}".format(grad.max())))
# tensorDifference.register_hook(lambda grad: print("2 min: {}".format(grad.min())))
tensorDifference = tensorDifference.sqrt()
# tensorDifference.register_hook(lambda grad: print("3 max: {}".format(grad.max())))
# tensorDifference.register_hook(lambda grad: print("3 min: {}".format(grad.min())))
tensorDist = self.moduleDist(self.moduleMain(torch.cat([ tensorDifference, tensorFlow - tensorFlow.view(tensorFlow.size(0), 2, -1).mean(2, True).view(tensorFlow.size(0), 2, 1, 1), self.moduleFeat(tensorFeaturesFirst) ], 1)))
tensorDist = tensorDist.pow(2.0).neg()
tensorDist = (tensorDist - tensorDist.max(1, True)[0]).exp()
tensorDivisor = tensorDist.sum(1, True).reciprocal()
tensorScaleX = self.moduleScaleX(tensorDist * torch.nn.functional.unfold(input=tensorFlow[:, 0:1, :, :], kernel_size=self.intUnfold, stride=1, padding=int((self.intUnfold - 1) / 2)).view_as(tensorDist)) * tensorDivisor
tensorScaleY = self.moduleScaleY(tensorDist * torch.nn.functional.unfold(input=tensorFlow[:, 1:2, :, :], kernel_size=self.intUnfold, stride=1, padding=int((self.intUnfold - 1) / 2)).view_as(tensorDist)) * tensorDivisor
return torch.cat([ tensorScaleX, tensorScaleY ], 1)
self.moduleFeatures = Features()
self.moduleMatching = torch.nn.ModuleList([ Matching(intLevel) for intLevel in [ 2, 3, 4, 5, 6 ] ])
self.moduleSubpixel = torch.nn.ModuleList([ Subpixel(intLevel) for intLevel in [ 2, 3, 4, 5, 6 ] ])
self.moduleRegularization = torch.nn.ModuleList([ Regularization(intLevel) for intLevel in [ 2, 3, 4, 5, 6 ] ])
# self.load_state_dict(torch.load('./network-' + arguments_strModel + '.pytorch'))
# Initialization
for m in self.modules():
classname = m.__class__.__name__
if isinstance(m, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
# # m.weight.data.normal_(0, 0.02)
m.weight.data = torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, inputs):
"""Forward pass
Args:
inputs (list): list of two image tensors, each with shape [Nx3xHxW]
Returns:
a dictionary containing flow predictions at different scale
- **1** (tensor, [Nx2x(H/2)x(W/2)])
- **2** (tensor, [Nx2x(H/4)x(W/4)])
- **3** (tensor, [Nx2x(H/8)x(W/8)])
- **4** (tensor, [Nx2x(H/16)x(W/16)])
- **5** (tensor, [Nx2x(H/32)x(W/32)])
"""
tensorFirst, tensorSecond = inputs
tensorFeaturesFirst = self.moduleFeatures(tensorFirst)
tensorFeaturesSecond = self.moduleFeatures(tensorSecond)
tensorFirst = [ tensorFirst ]
tensorSecond = [ tensorSecond ]
for intLevel in [ 1, 2, 3, 4, 5 ]:
tensorFirst.append(torch.nn.functional.interpolate(input=tensorFirst[-1], size=(tensorFeaturesFirst[intLevel].size(2), tensorFeaturesFirst[intLevel].size(3)), mode='bilinear', align_corners=False))
tensorSecond.append(torch.nn.functional.interpolate(input=tensorSecond[-1], size=(tensorFeaturesSecond[intLevel].size(2), tensorFeaturesSecond[intLevel].size(3)), mode='bilinear', align_corners=False))
tensorFlow = None
flows = {}
for cnt, intLevel in enumerate([ -1, -2, -3, -4, -5 ]):
tensorFlow = self.moduleMatching[intLevel](tensorFirst[intLevel], tensorSecond[intLevel], tensorFeaturesFirst[intLevel], tensorFeaturesSecond[intLevel], tensorFlow)
tensorFlow = self.moduleSubpixel[intLevel](tensorFirst[intLevel], tensorSecond[intLevel], tensorFeaturesFirst[intLevel], tensorFeaturesSecond[intLevel], tensorFlow)
tensorFlow = self.moduleRegularization[intLevel](tensorFirst[intLevel], tensorSecond[intLevel], tensorFeaturesFirst[intLevel], tensorFeaturesSecond[intLevel], tensorFlow)
flows[5-cnt] = tensorFlow
# post-processing flow
for i in flows:
# flows[i] = flows[i] * (20.0 * (0.5 ** (i-1)))
flows[i] = flows[i] * (20.0 * (0.5 ** (i)))
return flows
| [
"[email protected]"
] | |
4b4238839723416be5a682718dbe7694d6fd4b21 | 91a3a7998a370e1280d01ec722625d2d7afc2226 | /autodiff/context.py | ccf8f58da9ded0cb6dcc2902d42216c07de395af | [
"MIT"
] | permissive | sujason/quantitative | f4cb08614ba5a7276b84fa0694f129ed27319ab8 | 42b7fdea05629934f513a7b15e5b03c7697c5c46 | refs/heads/master | 2021-01-01T05:31:56.613945 | 2016-01-26T03:24:44 | 2016-01-26T03:24:44 | 12,944,434 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 45,855 | py | """
Example of how to use byte-code execution technique to trace accesses to numpy
arrays.
This file demonstrates two applications of this technique:
* optimize numpy computations for repeated calling
* provide automatic differentiation of procedural code
"""
import __builtin__
import ctypes
import inspect
import logging
import opcode
#import os
import sys
#import trace
import traceback
import types
import numpy as np
import theano
import autodiff
from autodiff.utils import itercode, orderedcallargs, flat_from_doc
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
# XXX FIXME This will not do - seed must be exposed.
global_randomstreams = RandomStreams(seed=123)
# Opcode help: http://docs.python.org/library/dis.html
# -- cellget returns the contents of a cell
cellget = ctypes.pythonapi.PyCell_Get
cellget.restype = ctypes.py_object
cellget.argtypes = (ctypes.py_object,)
# -- cellmake creates a cell pointer
cellmake = ctypes.pythonapi.PyCell_New
cellmake.restype = ctypes.py_object
cellmake.argtypes = (ctypes.py_object,)
def istensor(x):
tensortypes = (theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, tensortypes)
class Unassigned(object):
"""Unassigned value"""
class LoadUnassigned(Exception):
"""Access to Unassigned value"""
class FrameVM(object):
"""
A Class for evaluating a code block of CPython bytecode,
and tracking accesses to numpy arrays.
"""
def __init__(self, watcher, func):
logger.debug('FrameVM: {0}'.format(func))
self.watcher = watcher
if isinstance(func, autodiff.symbolic.Function):
func = func.pyfn
self.func = func
self.stack = []
self._locals = None
self._myglobals = None
self.code_iter = None
self.print_ops = False
self.print_stack = False
def push(self, item):
if item is Unassigned:
raise LoadUnassigned()
self.stack.append(item)
def pop(self):
return self.stack.pop(-1)
def pushN(self, items):
for item in items:
if item is Unassigned:
raise LoadUnassigned()
self.stack.extend(items)
def popN(self, N):
rval = self.stack[-N:]
self.stack[-N:] = []
return rval
def add_shadow(self, x):
if id(x) in self.watcher.constants:
return
# -- We cannot safely set up shadow variables that are aliased to
# memory that is visible to the running program, unless that
# program can guarantee that all views of that memory are
# immutable. CPython caches small ints (-5 <= i <= 256), so
# we wrap them in a non-cached _int() instance.
if isinstance(x, int):
if type(x) is int and -5 <= x <= 256:
x = np.int_(x)
s_x = self.watcher.shared(np.asarray(x))
elif isinstance(x, float):
s_x = self.watcher.shared(np.asarray(x))
elif getattr(x, 'dtype', None) == bool:
print >> sys.stderr, ('Warning: Theano has no bool, '
'upgrading to int8')
s_x = self.watcher.shared(x.astype('int8'))
elif isinstance(x, (np.ndarray, np.number)):
s_x = self.watcher.shared(x)
else:
return
self.watcher.shadow(x, s_x)
def ensure_shadow(self, x):
# small ints can not be shadowed due to CPython memory caching, so we
# wrap them in non-cached _ints.
if type(x) is int and -5 <= x <= 256:
x = np.int_(x)
if id(x) not in self.watcher:
self.add_shadow(x)
return self.watcher.getvar(x)
def call(self, args, kwargs):
if not isinstance(args, tuple):
raise TypeError('vm.call: args must be tuple', args)
if not isinstance(kwargs, dict):
raise TypeError('vm.call: kwargs must be dict', kwargs)
func = self.func
if isinstance(func, type) and issubclass(func, BaseException):
# XXX not shadowing exception creation, because exceptions
# do not have func_code. Is this OK? can we do better?
return func(*args, **kwargs)
func_code = self.func.func_code
self._myglobals = {}
self._locals = []
for name in func_code.co_names:
#print 'name', name
try:
self._myglobals[name] = func.func_globals[name]
except KeyError:
try:
self._myglobals[name] = __builtin__.__getattribute__(name)
except AttributeError:
#print 'WARNING: name lookup failed', name
pass
# get function arguments
argspec = inspect.getargspec(func)
# match function arguments to passed parameters
callargs = orderedcallargs(func, *args, **kwargs)
# named args => locals
self._locals.extend(callargs[arg] for arg in argspec.args)
# *args => locals
if argspec.varargs:
self._locals.append(callargs[argspec.varargs])
# **kwargs => locals
if argspec.keywords:
self._locals.append(callargs[argspec.keywords])
# other vars => locals
no_unbound_args = len(func_code.co_varnames) - len(self._locals)
self._locals.extend([Unassigned] * no_unbound_args)
# shadow arguments
for val in flat_from_doc(callargs):
if id(val) not in self.watcher:
self.add_shadow(val)
self.code_iter = itercode(func_code.co_code)
jmp = None
while not hasattr(self, 'rval'):
try:
i, op, arg = self.code_iter.send(jmp)
except StopIteration:
break
name = opcode.opname[op]
# method names can't have '+' in them
name = {'SLICE+0': 'SLICE_PLUS_0',
'SLICE+1': 'SLICE_PLUS_1',
'SLICE+2': 'SLICE_PLUS_2',
'SLICE+3': 'SLICE_PLUS_3',
'STORE_SLICE+0': 'STORE_SLICE_PLUS_0',
'STORE_SLICE+1': 'STORE_SLICE_PLUS_1',
'STORE_SLICE+2': 'STORE_SLICE_PLUS_2',
'STORE_SLICE+3': 'STORE_SLICE_PLUS_3',
}.get(name, name)
if self.print_ops:
print 'OP: ', i, name
if self.print_stack:
print self.stack
try:
op_method = getattr(self, 'op_' + name)
except AttributeError:
raise AttributeError('FrameVM does not have a method defined '
'for \'op_{0}\''.format(name))
except:
raise
jmp = op_method(i, op, arg)
return self.rval
def op_BINARY_ADD(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
# No Theano vars allowed on the stack
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 + arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 + s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 + s2)
#print 'added sym'
def op_BINARY_DIVIDE(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 / arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 / s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 / s2)
def op_BINARY_FLOOR_DIVIDE(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 // arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 // s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 // s2)
def op_BINARY_SUBTRACT(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 - arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 - s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 - s2)
def op_BINARY_MULTIPLY(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 * arg2
self.push(r)
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 * s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 * s2)
#print 'mul sym', id(r)
def op_BINARY_POWER(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 ** arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2).astype(s1.dtype)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 ** s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 ** s2)
#print 'mul sym', id(r)
def op_BINARY_MODULO(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 % arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 % s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 % s2)
def op_BINARY_SUBSCR(self, i, op, arg):
# Implements TOS = TOS1[TOS].
tos1, tos = self.popN(2)
#print 'tos', tos
#print 'tos1', tos1
rval = tos1[tos]
self.push(rval)
if id(tos) in self.watcher:
s_tos = self.ensure_shadow(tos)
else:
s_tos = tos
if id(tos1) in self.watcher:
s_tos1 = self.ensure_shadow(tos1)
else:
s_tos1 = tos1
if isinstance(tos, np.ndarray) and tos.dtype == bool:
s_rval = s_tos1[s_tos.nonzero()]
else:
s_rval = s_tos1[s_tos]
if id(tos) in self.watcher or id(tos1) in self.watcher:
self.watcher.shadow(rval, s_rval)
def op_BUILD_MAP(self, i, op, arg):
self.push({})
def op_BUILD_SLICE(self, i, op, arg):
if arg == 2:
tos1, tos = self.popN(2)
self.push(slice(tos1, tos))
elif arg == 3:
tos2, tos1, tos = self.popN(3)
self.push(slice(tos2, tos1, tos))
else:
raise NotImplementedError()
def op_BUILD_TUPLE(self, i, op, arg):
if arg:
self.push(tuple(self.popN(arg)))
else:
self.push(())
def op_BUILD_LIST(self, i, op, arg):
if arg:
self.push(list(self.popN(arg)))
else:
self.push([])
def op_CALL_FUNCTION(self, i, op, arg, call_vargs=None, call_kwargs=None):
if call_vargs is None:
# -- these are the things passed with *foo syntax
call_vargs = ()
if call_kwargs is None:
# -- these are the things passed with **foo syntax
call_kwargs = {}
n_args = arg & 0xFF
n_kwargs = (arg & 0xFF00) >> 8
#print 'N_ARGS', n_args, n_kwargs, call_vargs
assert not (arg >> 16) # what would this stuff up here mean?
kwargs = dict([(self.stack[-2 * ii], self.stack[-2 * ii + 1])
for ii in range(n_kwargs, 0, -1)])
args = [self.stack[-ii - 2 * n_kwargs] for ii in range(n_args, 0, -1)]
assert all(Unassigned is not ai for ai in args)
# -- pop all args off the stack
if arg:
self.stack = self.stack[:- n_args - 2 * n_kwargs]
# -- pop the function itself off the stack
func = self.pop()
args = args + list(call_vargs)
orig_kwargs_size = len(kwargs)
kwargs.update(call_kwargs)
assert len(kwargs) == orig_kwargs_size + len(call_kwargs)
#print dir(func)
#print func.__self__
all_args = args + kwargs.values()
# -- get symbolic args
if len(call_vargs) > 0:
s_args = [self.watcher.getvar(a) for a in args[:-len(call_vargs)]]
s_args.extend(self.watcher.getvar(a) for a in call_vargs)
s_args = tuple(s_args)
else:
s_args = tuple(self.watcher.getvar(a) for a in args)
s_kwargs = dict([(kw, self.watcher.getvar(val))
for kw, val in kwargs.items()])
if hasattr(func, '__theano_op__'):
# XXX: document that we are assuming func is pure -
# if rval depends on globals or closure this Context is not
# going to know that.
# -- hand control back to Python for duration of func
rval = func(*args, **kwargs)
if any(id(a) in self.watcher for a in all_args):
s_rval = func.__theano_op__(*s_args, **s_kwargs)
self.watcher.shadow(rval, s_rval)
# ================ NumPy and builtin functions
elif ((getattr(func, '__module__', None)
and func.__module__.startswith('numpy'))
or isinstance(func, np.ufunc)
or str(func) == '<built-in function abs>'
or str(func) == '<built-in function max>'
or str(func) == '<built-in function min>'
or str(func) == '<built-in function sum>'):
rval = func(*args, **kwargs)
if any(id(a) in self.watcher for a in all_args):
if func.__name__ == 'sum':
if type(rval) == int:
rval = np.int_(rval)
s_rval = theano.tensor.sum(*s_args, **s_kwargs)
self.watcher.shadow(rval, s_rval)
elif func.__name__ in ('abs', 'absolute'):
self.watcher.shadow(rval, abs(*s_args))
elif func.__name__ == 'max':
assert str(func) == '<built-in function max>'
s_rval = theano.tensor.maximum(*s_args, **s_kwargs)
assert s_rval.ndim == 0 # builtin max can't make vector
self.watcher.shadow(rval, s_rval)
elif func.__name__ == 'min':
assert str(func) == '<built-in function min>'
s_rval = theano.tensor.minimum(*s_args, **s_kwargs)
assert s_rval.ndim == 0 # builtin min can't make vector
self.watcher.shadow(rval, s_rval)
elif func.__name__ == 'reshape':
self.watcher.shadow(
rval, theano.tensor.reshape(*s_args, **s_kwargs))
elif func.__name__ == 'arange':
# tensor.arange takes the dtype of its input but
# numpy.arange does not. Since we are compiling the Theano
# graph, recast the numpy value to match the symbolic dtype
sval = theano.tensor.arange(*s_args, **s_kwargs)
rval = rval.astype(sval.dtype)
elif func.__name__ in theano.tensor.basic._cast_mapping.keys():
# handle cast functions
rval = func(*args, **kwargs)
sval = theano.tensor.cast(*s_args, dtype=func.__name__)
self.watcher.shadow(rval, sval)
elif func.__name__ in ['bool', 'bool_', 'bool8']:
# Theano has no bool type, cast to int8 instead
sval = theano.tensor.cast(*s_args, dtype='int8')
elif func.__name__ in ['ones', 'zeros']:
s_fn = getattr(theano.tensor, func.__name__)
sval = s_fn(*s_args, **s_kwargs).astype(str(rval.dtype))
self.watcher.shadow(rval, sval)
elif func.__name__ == 'identity':
# theano has no identity function, only 'eye'
dtype = s_kwargs.get('dtype', None)
if not dtype and len(s_args) > 1:
dtype = s_args[1]
sval = theano.tensor.eye(s_args[0], dtype=dtype)
self.watcher.shadow(rval, sval)
else:
try:
theano_fn = getattr(theano.tensor, func.__name__)
except:
raise NotImplementedError(func)
# XXX should we do this? since it is not obvious that
# reductions don't take symbolic args, this could lead to
# users compiling functions that are supposed to have axis
# arguments but silently ignore them. Leaving this
# functionality out for now -- Users must call Constant()
# explicitly.
# many Theano reductions do not support symbolic axes
# by checking for it here we don't have to wrap the
# argument in a Constant()
# argspec = orderedargspec(theano_fn, *s_args, **s_kwargs)
# if (istensor(argspec.get('axis', None)) and
# func.__name__ not in ['concatenate']):
# if 'axis' in s_kwargs:
# s_kwargs['axis'] = kwargs['axis']
# else:
# r_axis = args[argspec.args.index('axis')]
# s_args[argspec.args.index('axis')] = r_axis
self.watcher.shadow(rval, theano_fn(*s_args, **s_kwargs))
else:
# no argument was shadowed (e.g. zeros())
self.add_shadow(rval)
# ================ Array methods
elif isinstance(getattr(func, '__self__', None),
(np.ndarray, np.number)):
assert id(func.__self__) in self.watcher
s_self = self.watcher.svars[id(func.__self__)]
if 0:
pass
elif func.__name__ == 'copy':
assert not args
assert not kwargs
rval = func()
self.watcher.shadow(rval, s_self.copy())
elif func.__name__ == 'reshape':
rval = func(*args, **kwargs)
# Theano requires shape to be a tuple
if not isinstance(s_args[0], (list, tuple)):
s_args = (s_args,)
self.watcher.shadow(rval, s_self.reshape(*s_args, **s_kwargs))
elif func.__name__ == 'swapaxes':
rval = func(*args, **kwargs)
axis1, axis2 = args
s_dims = range(s_self.ndim)
s_dims[axis1], s_dims[axis2] = s_dims[axis2], s_dims[axis1]
self.watcher.shadow(rval, s_self.dimshuffle(*s_dims))
elif func.__name__ == 'astype':
rval = func(*args, **kwargs)
if 'dtype' in kwargs:
dtype = kwargs['dtype']
else:
dtype = args[0]
if not isinstance(dtype, str):
# catch numpy dtype objects like np.float32
try:
dtype = dtype.__name__
except:
raise NotImplementedError
if dtype == 'bool':
dtype == 'int8'
self.watcher.shadow(rval, s_self.astype(dtype))
elif func.__name__ == 'sort':
# sort is an inplace method
rval = func() # returns None
# shadow the original array; it has been updated inplace
self.watcher.shadow(func.__self__, s_self.sort())
else:
try:
theano_fn = getattr(s_self, func.__name__)
except:
raise NotImplementedError(func)
rval = func(*args, **kwargs)
self.watcher.shadow(rval, theano_fn(*s_args, **s_kwargs))
# ================ built-ins
elif 'built-in' in str(func):
if len(args) == len(kwargs) == 0:
rval = func()
# -- built-in ndarray methods should be caught above, not here.
elif func.__name__ in ('setdefault',):
rval = func(*args, **kwargs)
elif func.__name__ in ('enumerate', 'range', 'xrange', 'zip'):
rval = func(*args, **kwargs)
elif 'method rand of mtrand.RandomState' in str(func):
# build Theano random uniform numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
low=0,
high=1,
size=tuple(args),
dtype=str(np.asarray(rval).dtype)))
elif ('method random of mtrand.RandomState' in str(func)
or 'method random_sample of mtrand.RandomState'
in str(func)):
# build Theano random uniform numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
low=0,
high=1,
size=autodiff.utils.as_seq(args[0], tuple),
dtype=str(np.asarray(rval).dtype)))
elif 'method uniform of mtrand.RandomState' in str(func):
# build Theano random normal numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
*args,
dtype=str(np.asarray(rval).dtype),
**kwargs))
else:
raise NotImplementedError(func)
# ================ Types
elif type(func) == type:
rval = func(*args, **kwargs)
# ================ AutoDiff Functions
elif func is autodiff.functions.constant:
# make sure the rval will have a vaild id, then add it to the
# Context's constants set (so it can be ignored)
rval = func(*args, **kwargs)
if isinstance(rval, int):
rval = np.int_(rval)
elif isinstance(rval, float):
rval = np.float_(rval)
elif isinstance(rval, bool):
rval = np.bool_(rval)
else:
rval = np.asarray(rval)
self.watcher.constants.add(id(rval))
elif func is autodiff.functions.tag:
# make sure the rval is shadowed, then add a new svar with the
# appropriate tag
rval = func(*args, **kwargs)
tag = kwargs.pop('tag', args[1])
sval = self.ensure_shadow(rval)
self.watcher.svars[tag] = sval
# ================ Everything Else
else:
logger.debug('stepping into %s' % str(func))
vm = FrameVM(self.watcher, func)
rval = vm.call(tuple(args), kwargs)
self.push(rval)
def op_CALL_FUNCTION_VAR(self, i, op, arg):
call_vargs = self.pop()
return self.op_CALL_FUNCTION(i, op, arg, call_vargs=call_vargs)
def op_CALL_FUNCTION_VAR_KW(self, i, op, arg):
call_vargs, call_kwargs = self.popN(2)
rval = self.op_CALL_FUNCTION(i,
op,
arg,
call_vargs=call_vargs,
call_kwargs=call_kwargs)
return rval
def op_COMPARE_OP(self, i, op, arg):
opname = opcode.cmp_op[arg]
right = self.pop()
left = self.pop()
if 0:
pass
elif opname == '==':
self.push(left == right)
elif opname == '!=':
self.push(left != right)
elif opname == '>':
self.push(left > right)
elif opname == '<':
self.push(left < right)
elif opname == '>=':
self.push(left >= right)
elif opname == '<=':
self.push(left <= right)
elif opname == 'is':
self.push(left is right)
elif opname == 'in':
self.push(left in right)
else:
raise NotImplementedError('comparison: %s' % opname)
if any(id(a) in self.watcher for a in [left, right]):
sargs = [self.watcher.getvar(ai) for ai in [left, right]]
tos = self.stack[-1]
if 0:
pass
elif opname == '==':
self.watcher.shadow(tos, theano.tensor.eq(*sargs))
elif opname == '!=':
self.watcher.shadow(tos, theano.tensor.neq(*sargs))
elif opname == '<':
self.watcher.shadow(tos, theano.tensor.lt(*sargs))
elif opname == '>':
self.watcher.shadow(tos, theano.tensor.gt(*sargs))
elif opname == '<=':
self.watcher.shadow(tos, theano.tensor.le(*sargs))
elif opname == '>=':
self.watcher.shadow(tos, theano.tensor.ge(*sargs))
elif opname == 'is':
pass
else:
raise NotImplementedError('Comparison on watched args',
opname)
def op_DUP_TOP(self, i, op, arg):
self.stack.append(self.stack[-1])
def op_DUP_TOPX(self, i, op, arg):
assert arg > 0
self.stack.extend(self.stack[-arg:])
def op_FOR_ITER(self, i, op, arg):
# either push tos.next()
# or pop tos and send (arg)
tos = self.stack[-1]
try:
next = tos.next()
# print 'next', next
self.push(next)
except StopIteration:
self.pop()
return ('rel', arg)
def op_INPLACE_ADD(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r += tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos + s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos + s_tos1)
def op_INPLACE_DIVIDE(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r /= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos / s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos / s_tos1)
def op_INPLACE_MULTIPLY(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r *= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos * s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos * s_tos1)
def op_INPLACE_SUBTRACT(self, i, op, arg):
tos1, tos = self.popN(2)
r = tos1
r -= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos - s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos - s_tos1)
def op_JUMP_ABSOLUTE(self, i, op, arg):
# print 'sending', arg
return ('abs', arg)
def op_JUMP_FORWARD(self, i, op, arg):
return ('rel', arg)
def op_JUMP_IF_TRUE(self, i, op, arg):
tos = self.stack[-1]
if tos:
return ('rel', arg)
def op_GET_ITER(self, i, op, arg):
# replace tos -> iter(tos)
tos = self.stack[-1]
if id(tos) in self.watcher:
raise NotImplementedError('iterator of watched value')
self.stack[-1] = iter(tos)
def op_LOAD_GLOBAL(self, i, op, arg):
# print 'LOAD_GLOBAL', self.names[arg]
tos = self._myglobals[self.func.func_code.co_names[arg]]
if type(tos) is int and -5 <= tos <= 256:
tos = np.int_(tos)
self.push(tos)
if id(tos) not in self.watcher:
self.add_shadow(self.stack[-1])
def op_LOAD_ATTR(self, i, op, arg):
# print 'LOAD_ATTR', self.names[arg]
attr = self.func.func_code.co_names[arg]
#
# we would like to do
# self.stack[-1] = getattr(TOS, attr)
#
# *EXCEPT* if attr is a property, then it actually represents a
# function call
tos = self.pop()
if isinstance(tos, np.ndarray):
if id(tos) not in self.watcher:
raise NotImplementedError(
'how did this var get here?', (id(tos), tos))
if id(tos) in self.watcher:
s_tos = self.watcher.svars[id(tos)]
if attr == 'shape':
rval = tos.shape
# note this old comment... what does it mean?
# XXX: NOT TRACKING SHAPE CHANGES BECAUSE
# BAD INTERACTION WITH fbncc.__theano_op__
self.watcher.shadow(rval, s_tos.shape)
elif attr == 'T':
rval = tos.T
self.watcher.shadow(rval, s_tos.T)
elif attr == 'imag':
rval = tos.imag
self.watcher.shadow(rval, s_tos.imag)
else:
try:
rval = getattr(tos, attr)
except:
raise NotImplementedError('ndarray attribute %s' % attr)
self.push(rval)
else:
logger.debug('attribute access %s' % attr)
rval = getattr(tos, attr)
self.push(rval)
# if (isinstance(rval, np.ndarray)
# and id(rval) not in self.watcher):
# self.add_shadow(rval)
if id(rval) not in self.watcher:
self.add_shadow(rval)
def op_LOAD_CONST(self, i, op, arg):
tos = self.func.func_code.co_consts[arg]
if type(tos) is int and -5 <= tos <= 256:
tos = np.int_(tos)
self.push(tos)
# if isinstance(tos, float):
# if id(tos) not in self.watcher:
# var = theano.tensor.as_tensor_variable(tos)
# self.watcher.svars[id(tos)] = var
if (isinstance(tos, np.ndarray) and id(tos) not in self.watcher):
raise NotImplementedError()
def op_LOAD_CLOSURE(self, i, op, arg):
co_cellvars = self.func.func_code.co_cellvars
co_freevars = self.func.func_code.co_freevars
co_varnames = self.func.func_code.co_varnames
if arg < len(co_cellvars):
name = co_cellvars[arg]
else:
name = co_freevars[arg - len(co_cellvars)]
thing = self._locals[co_varnames.index(name)]
cell = cellmake(thing)
self.push(cell)
def op_LOAD_DEREF(self, i, op, arg):
# -- this is called to access a variable that appears in multiple
# scopes.
# -- vars *referenced* by nested scopes
co_cellvars = self.func.func_code.co_cellvars
# -- vars read from enclosing scopes
co_freevars = self.func.func_code.co_freevars
# -- all varnames
co_varnames = self.func.func_code.co_varnames
if arg < len(co_cellvars):
# -- normal case
name = co_cellvars[arg]
# -- XXX: Is this really the right thing to do??
thing = self._locals[co_varnames.index(name)]
else:
name = co_freevars[arg - len(co_cellvars)]
closure = self.func.func_closure
assert len(co_freevars) == len(closure)
# print 'LOAD_DEREF (%s:%s)' % (self.func, name)
cell = closure[arg - len(co_cellvars)]
thing = cellget(cell)
self.push(thing)
# if (isinstance(thing, np.ndarray) and id(thing) not in self.watcher):
# self.add_shadow(thing)
if id(thing) not in self.watcher:
self.add_shadow(thing)
def op_LOAD_FAST(self, i, op, arg):
tos = self._locals[arg]
try:
self.push(tos)
except LoadUnassigned:
raise LoadUnassigned(self.func.func_code.co_varnames[arg])
if not isinstance(tos, (int, float)):
if id(tos) not in self.watcher:
self.add_shadow(tos)
def op_MAKE_CLOSURE(self, i, op, arg):
return self.op_MAKE_FUNCTION(i, op, arg, w_closure=True)
def op_MAKE_FUNCTION(self, i, op, arg, w_closure=False):
func_code = self.pop()
if w_closure:
cells = self.pop()
if arg:
argdefs = tuple(self.stack[-arg:])
self.stack[-arg:] = []
else:
argdefs = ()
if w_closure:
fn = types.FunctionType(func_code,
self.func.func_globals,
argdefs=argdefs,
closure=cells,)
else:
fn = types.FunctionType(func_code,
self.func.func_globals,
argdefs=argdefs)
self.push(fn)
def op_POP_BLOCK(self, i, op, arg):
logger.debug('POP_BLOCK, what to do?')
pass
def op_POP_JUMP_IF_FALSE(self, i, op, arg):
#tos = self.stack[-1]
tos = self.pop()
if not tos:
return ('abs', arg)
def op_POP_JUMP_IF_TRUE(self, i, op, arg):
#tos = self.stack[-1]
tos = self.pop()
if tos:
return ('abs', arg)
def op_POP_TOP(self, i, op, arg):
self.pop()
def op_PRINT_ITEM(self, i, op, arg):
thing = self.pop()
if str(thing) == 'PRINT_OPS:True':
self.print_ops = True
if str(thing) == 'PRINT_STACK:True':
self.print_stack = True
print thing,
def op_PRINT_NEWLINE(self, i, op, arg):
print ''
def op_SETUP_LOOP(self, i, op, arg):
logger.debug('SETUP_LOOP, what to do?')
pass
def op_SLICE_PLUS_0(self, i, op, arg):
#Implements TOS = TOS[:].
TOS = self.pop()
new_tos = TOS[:]
self.push(new_tos)
if id(TOS) in self.watcher:
s = self.watcher.getvar(TOS)
s_rval = s[:]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_1(self, i, op, arg):
# TOS = TOS1[TOS:]
TOS1, TOS = self.popN(2)
new_tos = TOS1[TOS:]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s_rval = s1[s:]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_2(self, i, op, arg):
# TOS = TOS1[:TOS]
TOS1, TOS = self.popN(2)
new_tos = TOS1[:TOS]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s_rval = s1[:s]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_3(self, i, op, arg):
# Implements TOS = TOS2[TOS1:TOS]
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS2[TOS1:TOS]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s2 = self.watcher.getvar(TOS2)
s_rval = s2[s1:s]
self.watcher.shadow(new_tos, s_rval)
def op_STORE_ATTR(self, i, op, arg):
# implements TOS.name = TOS1
TOS1, TOS = self.popN(2)
if TOS in self.watcher:
raise NotImplementedError()
name = self.func.func_code.co_names[arg]
setattr(TOS, name, TOS1)
def op_STORE_SLICE_PLUS_0(self, i, op, arg):
#Implements TOS[:] = TOS1
TOS1, TOS = self.popN(2)
new_tos = TOS
new_tos[:] = TOS1
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_rval = theano.tensor.set_subtensor(s_tos[:], s_tos1)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_1(self, i, op, arg):
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS1
new_tos[TOS:] = TOS2
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_rval = theano.tensor.set_subtensor(s_tos1[s_tos:], s_tos2)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_2(self, i, op, arg):
# TOS1[:TOS] = TOS2
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS1
new_tos[:TOS] = TOS2
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_rval = theano.tensor.set_subtensor(s_tos1[:s_tos], s_tos2)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_3(self, i, op, arg):
# Implements TOS2[TOS1:TOS] = TOS3
TOS3, TOS2, TOS1, TOS = self.popN(4)
new_tos = TOS2
new_tos[TOS1:TOS] = TOS3
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2, TOS3]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_tos3 = self.watcher.getvar(TOS3)
s_rval = theano.tensor.set_subtensor(s_tos2[s_tos1:s_tos], s_tos3)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_FAST(self, i, op, arg):
self._locals[arg] = self.pop()
def op_STORE_MAP(self, i, op, arg):
key = self.pop()
val = self.pop()
dct = self.stack[-1]
dct[key] = val
def op_STORE_SUBSCR(self, i, op, arg):
# Implements TOS1[TOS] = TOS2.
tos = self.pop()
tos1 = self.pop()
tos2 = self.pop()
tos1[tos] = tos2
# tos can't be real-valued so there's no gradient through it
if id(tos1) in self.watcher or id(tos2) in self.watcher:
s_tos1 = self.ensure_shadow(tos1)
s_tos2 = self.ensure_shadow(tos2)
new_s_tos1 = theano.tensor.set_subtensor(s_tos1[tos], s_tos2)
self.watcher.svars[id(tos1)] = new_s_tos1
def op_RAISE_VARARGS(self, i, op, arg):
print >> sys.stderr, "Exception in autodiff.Context:"
if 1 <= arg:
exc = self.pop()
else:
exc = None
if 2 <= arg:
param = self.pop()
else:
param = None
if 3 <= arg:
tb = self.pop()
traceback.print_tb(tb, file=sys.stderr)
else:
print >> sys.stderr, "No traceback info available"
if param is not None:
raise param
elif exc is not None:
raise exc()
else:
raise Exception('Completely mysterious exception')
def op_RETURN_VALUE(self, i, op, arg):
self.rval = self.pop()
if id(self.rval) not in self.watcher:
self.add_shadow(self.rval)
def op_ROT_TWO(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
self.stack[-1] = b
self.stack[-2] = a
def op_ROT_THREE(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
c = self.stack[-3]
self.stack[-1] = b
self.stack[-2] = c
self.stack[-3] = a
def op_ROT_FOUR(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
c = self.stack[-3]
d = self.stack[-4]
self.stack[-1] = b
self.stack[-2] = c
self.stack[-3] = d
self.stack[-4] = a
def op_UNARY_NEGATIVE(self, i, op, arg):
arg1 = self.pop()
assert not hasattr(arg1, 'type')
r = -arg1
self.push(r)
if id(arg1) in self.watcher:
s1 = self.ensure_shadow(arg1)
self.watcher.shadow(r, -s1)
def op_UNPACK_SEQUENCE(self, i, op, arg):
tos = self.pop()
self.stack.extend(tos[::-1])
class Context(object):
def __init__(self, device=None, borrowable=(), force_floatX=False):
"""
borrowable : tuple of objects
If an object in this tuple is encountered while tracing the
function, then its symbolic representation will alias that object's
memory location. This means that *inplace* operations on the Python
(likely NumPy) object will affect the symbolic function.
force_floatX : bool
If True, floats and float NumPy ndarrays will be cast to the dtype
specified at theano.config.floatX when forming symbolic shared
variables, if they do not have it already. Objects in `borrowable`
are never cast.
"""
self.svars = {}
self.nogc = [] # ids that must not be reused
# XXX: rethink to avoid actually holding on to all these intermediates.
self.device = device
self.borrowable_ids = [id(b) for b in borrowable]
self.force_floatX = force_floatX
self.constants = set()
def __iter__(self):
return self.svars.__iter__()
def shadow(self, rval, sval, force=True):
assert hasattr(sval, 'type') # assert sval is Theano variable
if force:
self.svars[id(rval)] = sval
else:
self.svars.setdefault(id(rval), sval)
# -- shadow vars have to match dtype and ndim
if isinstance(rval, np.ndarray):
if str(rval.dtype) == 'bool':
assert sval.dtype == 'int8', (rval.dtype, sval.dtype)
elif not self.force_floatX:
assert str(rval.dtype) == sval.dtype, (rval, sval)
assert rval.ndim == sval.ndim, (rval, sval)
# -- assert postcondition
assert sval is self.getvar(rval)
self.nogc.append(rval)
def call(self, fn, args=(), kwargs={}):
vm = FrameVM(self, fn)
return vm.call(args, kwargs)
def shared(self, obj, name=None, borrow=None):
if borrow is None:
borrow = (id(obj) in self.borrowable_ids)
if self.force_floatX and not borrow:
if (isinstance(obj, np.ndarray)
and 'float' in str(obj.dtype)
and str(obj.dtype) != theano.config.floatX):
obj = obj.astype(theano.config.floatX)
# not all objects have shared constructors with a borrow keyword
# for example theano.shared(np.float32(1)) works but
# theano.shared(np.float32(1), borrow=[False|True]) fails
if self.device == 'cpu':
try:
return theano.tensor._shared(obj, borrow=borrow)
except:
return theano.tensor._shared(obj)
else:
try:
return theano.shared(obj, borrow=borrow)
except:
return theano.shared(obj)
def getvar(self, var):
return self.svars.get(id(var), var)
def reset(self):
self.constants.clear()
| [
"[email protected]"
] | |
fcd11702e81bde025971e101d2d70f66f5e8cdb3 | 55e81ceffe94ec087ea9c4e6576093b56a86b852 | /src/modules/convolution.py | 0838e88f436ead2fb4833f31b9288f853b90c574 | [] | no_license | feathernox/neuroml_fmri_deepspd | 43941062e244c4ae9084821a7512a287407be0c3 | 119f968458a92d3ea644f55d1d4fab9527b73c16 | refs/heads/main | 2022-12-30T23:48:28.771036 | 2020-10-25T10:13:23 | 2020-10-25T10:13:23 | 306,640,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import torch
from torch import nn
from torch.nn import init
import math
class SimpleConvolution(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = nn.Parameter(torch.Tensor(in_channels, out_channels))
self.reset_parameters()
def forward(self, x):
assert x.ndim == 4
convolved = (self.weight[None, :, :, None, None] * x[:, :, None, :, :]).sum(dim=1)
return convolved
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
| [
"[email protected]"
] | |
79180c09bcb81b56e6d9d1043b6380e55871d2a0 | c7e765a9bed33d3bfb21774e3995bf4a09e04add | /adminmgr/media/code/A3/task1/BD_135_703_2371_KhgNwL4.py | 39a4a494197832cb4b20798bc47cbace9f61a4d5 | [
"Apache-2.0"
] | permissive | IamMayankThakur/test-bigdata | 13dd2ac7fb76c9baed6c3a0aa943057a22e2d237 | 7f507918c7bec31c92eedcd94491a83486623049 | refs/heads/master | 2022-05-03T00:59:44.127494 | 2022-02-10T19:50:16 | 2022-02-10T19:50:16 | 201,585,028 | 10 | 4 | Apache-2.0 | 2022-04-22T23:39:45 | 2019-08-10T05:34:09 | Python | UTF-8 | Python | false | false | 1,134 | py | import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def func(rdd):
sorted_rdd1 = rdd.sortBy(lambda x: (-x[1],x[0]))
sorted_rdd=sorted_rdd1.filter(lambda y: y[0] !='')
s_list=sorted_rdd.collect()
if(s_list!=[]):
print(s_list[0][0],s_list[1][0],s_list[2][0],s_list[3][0],s_list[4][0],sep=",")
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[2]))
ssc.checkpoint("/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
hashtag1=dataStream.window(int(sys.argv[1]),1)
if(',' in hashtag1.select(lambda w: w.split(";")[7])):
hashtag2=hashtag1.select(lambda w: w.split(";")[7])
hashtag3=hashtag2.flatmap(lambda p:p.split(","))
else:
hashtag3=hashtag1.flatmap(lambda w: w.split(";")[7])
hashtag4 = hashtag3.map(lambda x: (x,1))
#hashtags=hashtag4.reduceByKey(add)
hashtags=hashtag4.updateStateByKey(lambda x,y:int(x)+int(y))
hashtags.foreachRDD(func)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| [
"[email protected]"
] | |
37cd36176891ea926eef36e5b677f6b4352ae940 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-as/huaweicloudsdkas/v1/model/pause_scaling_group_request.py | c88cd3d690fe6f2fc4753b1834d1bd3b10a9f9f9 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,079 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PauseScalingGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_group_id': 'str',
'body': 'PauseScalingGroupOption'
}
attribute_map = {
'scaling_group_id': 'scaling_group_id',
'body': 'body'
}
def __init__(self, scaling_group_id=None, body=None):
"""PauseScalingGroupRequest
The model defined in huaweicloud sdk
:param scaling_group_id: 伸缩组ID
:type scaling_group_id: str
:param body: Body of the PauseScalingGroupRequest
:type body: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
self._scaling_group_id = None
self._body = None
self.discriminator = None
self.scaling_group_id = scaling_group_id
if body is not None:
self.body = body
@property
def scaling_group_id(self):
"""Gets the scaling_group_id of this PauseScalingGroupRequest.
伸缩组ID
:return: The scaling_group_id of this PauseScalingGroupRequest.
:rtype: str
"""
return self._scaling_group_id
@scaling_group_id.setter
def scaling_group_id(self, scaling_group_id):
"""Sets the scaling_group_id of this PauseScalingGroupRequest.
伸缩组ID
:param scaling_group_id: The scaling_group_id of this PauseScalingGroupRequest.
:type scaling_group_id: str
"""
self._scaling_group_id = scaling_group_id
@property
def body(self):
"""Gets the body of this PauseScalingGroupRequest.
:return: The body of this PauseScalingGroupRequest.
:rtype: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this PauseScalingGroupRequest.
:param body: The body of this PauseScalingGroupRequest.
:type body: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PauseScalingGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
76bbea84228c618e352dc125b30c93b04c64e117 | 6a420c47a9b8940ed2f437c0a49c7ccccf7473bd | /apps/blog/models/entry.py | 421764412b477c2fd97703ab561da5706ad45e74 | [] | no_license | deadendif/blog | f0198287e07dc95c5a12b44cad5dc58b86c2294e | 2eb48a7f66ae93a8bafcba824461e4c1a430e10a | refs/heads/master | 2021-06-20T23:47:14.189596 | 2017-08-12T06:25:25 | 2017-08-12T06:25:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,380 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.text import Truncator
from tagging.fields import TagField
from tagging.utils import parse_tag_input
from author import Author
from category import Category
from blog.settings import DRAFT, HIDDEN, PUBLISHED
from blog.settings import ENTRY_DETAIL_TEMPLATES, PREVIEW_SPLITTERS, PREVIEW_MORE_STRING
from blog.utils import entries_published
from blog.utils import markdown
from blog.managers import EntryPublishedManager
from blog.preview import HTMLPreview
class Entry(models.Model):
"""
Entry model class
"""
STATUS = (
(DRAFT, 'draft'),
(HIDDEN, 'hidden'),
(PUBLISHED, 'published')
)
title = models.CharField('title',
max_length=255,
help_text='Title of the entry.')
slug = models.SlugField('slug',
max_length=255,
help_text='Used to build the entry\' URL.')
status = models.IntegerField('status',
db_index=True, choices=STATUS, default=DRAFT,
help_text='Status of the entry.')
create_time = models.DateTimeField('Create time',
db_index=True, default=timezone.now,
help_text='Datetime when creating the entry.')
start_publish = models.DateTimeField('start publish',
db_index=True, blank=True, null=True,
help_text='Datetime when starting publication')
end_publish = models.DateTimeField('end publish',
db_index=True, blank=True, null=True,
help_text='Datetime when stopping publication')
last_update = models.DateTimeField('last update time',
default=timezone.now,
help_text='Datetime when last update the entry.')
excerpt = models.TextField('excerpt',
blank=True,
help_text='Used to SEO purposes.')
content = models.TextField('content',
blank=True,
help_text='Content of the entry.')
featured = models.BooleanField('featrue',
default=False,
help_text='Telling if the entry is featured')
author = models.ForeignKey(
Author, related_name='entries', null=True, on_delete=models.SET_NULL, # ???
help_text='Author of the entry.')
category = models.ForeignKey(
Category, related_name='entries', blank=False, null=True,
help_text='Categories that contain this entry.')
tags = TagField('tags')
login_required = models.BooleanField('login required',
default=False,
help_text='Telling if user need to be authenticated.')
password = models.CharField('password',
max_length=64, blank=True,
help_text='Protects the entry with a password.')
detail_template = models.CharField('detail template',
max_length=255, choices=ENTRY_DETAIL_TEMPLATES,
default=ENTRY_DETAIL_TEMPLATES[0],
help_text='The detail tempate of the entry.')
# Set managers
objects = models.Manager()
published = EntryPublishedManager()
@property
def publish_date(self):
"""
Return publish date or create data.
"""
return self.start_publish or self.create_time
@property
def is_visible(self):
"""
Check if the entry is visible.
"""
now = timezone.now()
if (self.start_publish and now < self.start_publish) or \
(self.end_publish and now >= self.end_publish):
return False
return self.status == PUBLISHED
@property
def previous_next_entry(self):
"""
Return and cache previous and next published entry.
"""
attr_name = 'previous_next'
previous_next = getattr(self, attr_name, None)
if previous_next is None:
if not self.is_visible:
previous_next = (None, None)
setattr(self, 'previous_next', previous_next)
return previous_next
entries = list(self.__class__.published.all())
index = entries.index(self)
previous_entry = entries[index + 1] if index + 1 < len(entries) else None
next_entry = entries[index - 1] if index > 0 else None
previous_next = (previous_entry, next_entry)
setattr(self, attr_name, previous_next)
return previous_next
@property
def previous_entry(self):
"""
Return the previous published entry if exists.
"""
return self.previous_next_entry[0]
@property
def next_entry(self):
"""
Return the next published entry if exist.
"""
return self.previous_next_entry[1]
@property
def tags_list(self):
"""
Return iterable list of tags.
"""
return parse_tag_input(self.tags)
@models.permalink
def get_absolute_url(self):
"""
Return the entry's URL
"""
ctime = self.create_time
return ('blog:entries:detail', (), {
'year': self.create_time.strftime('%Y'),
'month': self.create_time.strftime('%m'),
'day': self.create_time.strftime('%d'),
'slug': self.slug
})
@property
def html_content(self):
"""
Return html content
"""
return markdown(self.content)
@property
def html_preview(self):
"""
Return html preview object
"""
return HTMLPreview(self.html_content)
# return HTMLPreview(self.html_content, self.excerpt)
@property
def word_count(self):
"""
Return the number of words
"""
return len(strip_tags(self.html_content).split())
def save(self, *args, **kwargs):
"""
[Override] update fields: last_update, excerpt
"""
self.last_update = timezone.now()
# if not self.excerpt and self.status == PUBLISHED:
# self.excerpt = HTMLPreview(self.html_content)
super(self.__class__, self).save(*args, **kwargs)
def __str__(self):
return '%s: %s' % (self.title, self.STATUS[self.status - 1][1])
class Meta:
ordering = ['-create_time']
get_latest_by = 'creation_date'
| [
"[email protected]"
] | |
37434a2d02bf51c411162c56fe9eda123ad980d9 | bede13ba6e7f8c2750815df29bb2217228e91ca5 | /advance_cash_flow_statements/wizard/account_account.py | 8ab4d6059149ffc32c123a592816f6a73772185a | [] | no_license | CybroOdoo/CybroAddons | f44c1c43df1aad348409924603e538aa3abc7319 | 4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14 | refs/heads/16.0 | 2023-09-01T17:52:04.418982 | 2023-09-01T11:43:47 | 2023-09-01T11:43:47 | 47,947,919 | 209 | 561 | null | 2023-09-14T01:47:59 | 2015-12-14T02:38:57 | HTML | UTF-8 | Python | false | false | 3,562 | py | # -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2019-TODAY Cybrosys Technologies(<https://www.cybrosys.com>)
# Author: Cybrosys Techno Solutions(<https://www.cybrosys.com>)
#
# You can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from odoo import api, models, fields
from odoo.tools.misc import get_lang
class AccountCommonReport(models.Model):
_inherit = "account.report"
_description = "Account Common Report"
company_id = fields.Many2one('res.company', string='Company', required=True,
readonly=True,
default=lambda self: self.env.company)
journal_ids = fields.Many2many(
comodel_name='account.journal',
string='Journals',
required=True,
default=lambda self: self.env['account.journal'].search(
[('company_id', '=', self.company_id.id)]),
domain="[('company_id', '=', company_id)]",
)
date_from = fields.Date(string='Start Date')
date_to = fields.Date(string='End Date')
target_move = fields.Selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], string='Target Moves', required=True,
default='posted')
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
self.journal_ids = self.env['account.journal'].search(
[('company_id', '=', self.company_id.id)])
else:
self.journal_ids = self.env['account.journal'].search([])
def _build_contexts(self, data):
result = {}
result['journal_ids'] = 'journal_ids' in data['form'] and data['form'][
'journal_ids'] or False
result['state'] = 'target_move' in data['form'] and data['form'][
'target_move'] or ''
result['date_from'] = data['form']['date_from'] or False
result['date_to'] = data['form']['date_to'] or False
result['strict_range'] = True if result['date_from'] else False
result['company_id'] = data['form']['company_id'][0] or False
return result
def _print_report(self, data):
raise NotImplementedError()
def check_report(self):
print('Account.report')
self.ensure_one()
data = {'ids': self.env.context.get('active_ids', []),
'model': self.env.context.get('active_model', 'ir.ui.menu'),
'form': self.read(
['date_from', 'date_to', 'journal_ids', 'target_move',
'company_id'])[0]}
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context,
lang=get_lang(self.env).code)
return self.with_context(discard_logo_check=True)._print_report(data)
| [
"[email protected]"
] | |
c5383493a1f9677eb1111b85946e9ad9e14fe2b8 | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_nonlinear_software/10-19_35.py | 6ae8d393dc099f7aac05202aa5a431adcbaf9881 | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,238 | py | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc2 = Location(env, mgr.GE(y, i_2))
loc2.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_3))
loc1 = Location(env, mgr.Equals(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
return frozenset(res)
| [
"[email protected]"
] | |
839fceccecbbd4b46b6c93de655ca6dc94e9bf89 | 3935927379f7f06a3aaeffef3e32c6261e93a4f4 | /CNN_variant2/retrain.py | 40b1a989ed855979d1f07a37d15baf02102621d4 | [] | no_license | Tanmoy-Bipro/BSL-alphabet-recognition | 7be07a45149bb07d0666c7d847720cb50d6cbc33 | 7c7717e1a8f7f274d0db0fead6ef269f8cc017dc | refs/heads/master | 2020-09-27T07:48:37.705809 | 2019-12-27T15:14:20 | 2019-12-27T15:14:20 | 226,467,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,860 | py |
# pylint: disable=line-too-long
r"""Simple transfer learning with image modules
This example shows how to train an image classifier based on any
TensorFlow Hub module that computes image feature vectors. By default,
it uses the feature vectors computed by Inception V3 trained on ImageNet.
See https://github.com/tensorflow/hub/blob/master/docs/modules/image.md
for more options.
The top layer receives as input a 2048-dimensional vector (assuming
Inception V3) for each image. We train a softmax layer on top of this
representation. If the softmax layer contains N labels, this corresponds
to learning N + 2048*N model parameters for the biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. (For a working example,
download http://download.tensorflow.org/example_images/flower_photos.tgz
and run tar xzf flower_photos.tgz to unpack it.)
Once your images are prepared, and you have pip-installed tensorflow-hub and
a sufficiently recent version of tensorflow, you can run the training with a
command like this:
```bash
python retrain.py --image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the tensorflow/examples/label_image sample code.
By default this script will use the highly accurate, but comparatively large and
slow Inception V3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--tfhub_module` flag with a
Mobilenet model. For more information on Mobilenet, see
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
For example:
Run floating-point version of Mobilenet:
```bash
python retrain.py --image_dir ~/flower_photos \
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/1
```
Run Mobilenet, instrumented for quantization:
```bash
python retrain.py --image_dir ~/flower_photos/ \
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/feature_vector/1
```
These instrumented models can be converted to fully quantized mobile models via
TensorFlow Lite.
There are different Mobilenet models to choose from, with a variety of file
size and latency options.
- The first number can be '100', '075', '050', or '025' to control the number
of neurons (activations of hidden layers); the number of weights (and hence
to some extent the file size and speed) shrinks with the square of that
fraction.
- The second number is the input image size. You can choose '224', '192',
'160', or '128', with smaller sizes giving faster speeds.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
To use with Tensorflow Serving, run this tool with --saved_model_dir set
to some increasingly numbered export location under the model base path, e.g.:
```bash
python retrain.py (... other args as before ...) \
--saved_model_dir=/tmp/saved_models/$(date +%s)/
tensorflow_model_server --port=9000 --model_name=my_image_classifier \
--model_base_path=/tmp/saved_models/
```
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
FLAGS = None
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
# The location where variable checkpoints will be stored.
CHECKPOINT_NAME = '/tmp/_retrain_checkpoint'
# A module is understood as instrumented for quantization with TF-Lite
# if it contains any of these ops.
FAKE_QUANT_OPS = ('FakeQuantWithMinMaxVars',
'FakeQuantWithMinMaxVarsPerChannel')
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
An OrderedDict containing an entry for each label subfolder, with images
split into training, testing, and validation sets within each label.
The order of items defines the class indices.
"""
if not tf.gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = collections.OrderedDict()
sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir))
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = sorted(set(os.path.normcase(ext) # Smash case on Windows.
for ext in ['JPEG', 'JPG', 'jpeg', 'jpg']))
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(tf.gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
"""Returns a path to an image for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, module_name):
"""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
module_name: The name of the image module being used.
Returns:
File system path string to an image that meets the requested parameters.
"""
module_name = (module_name.replace('://', '~') # URL scheme.
.replace('/', '~') # URL and Unix paths.
.replace(':', '~').replace('\\', '~')) # Windows paths.
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + module_name + '.txt'
def create_module_graph(module_spec):
"""Creates a graph and loads Hub Module into it.
Args:
module_spec: the hub.ModuleSpec for the image module being used.
Returns:
graph: the tf.Graph that was created.
bottleneck_tensor: the bottleneck values output by the module.
resized_input_tensor: the input images, resized as expected by the module.
wants_quantization: a boolean, whether the module has been instrumented
with fake quantization ops.
"""
height, width = hub.get_expected_image_size(module_spec)
with tf.Graph().as_default() as graph:
resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
m = hub.Module(module_spec)
bottleneck_tensor = m(resized_input_tensor)
wants_quantization = any(node.op in FAKE_QUANT_OPS
for node in graph.as_graph_def().node)
return graph, bottleneck_tensor, resized_input_tensor, wants_quantization
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
module_name: The name of the image module being used.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, module_name)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
bottlenecks.append(bottleneck_values)
ground_truths.append(label_index)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, module_spec):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
The jpeg input layer and the distorted result tensor.
"""
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
# Convert from full range of uint8 to range [0,1] of float32.
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(shape=[],
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, axis=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(shape=[],
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,
quantize_layer, is_training):
"""Adds a new softmax and fully-connected layer for training and eval.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
quantize_layer: Boolean, specifying whether the newly added layer should be
instrumented for quantization with TF-Lite.
is_training: Boolean, specifying whether the newly add layer is for training
or eval.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
batch_size, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list()
assert batch_size is None, 'We want to work with arbitrary batch size.'
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[batch_size, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(
tf.int64, [batch_size], name='GroundTruthInput')
# Organizing the following ops so they are easier to see in TensorBoard.
layer_name = 'final_retrain_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
# The tf.contrib.quantize functions rewrite the graph in place for
# quantization. The imported model graph has already been rewritten, so upon
# calling these rewrites, only the newly added final layer will be
# transformed.
if quantize_layer:
if is_training:
tf.contrib.quantize.create_training_graph()
else:
tf.contrib.quantize.create_eval_graph()
tf.summary.histogram('activations', final_tensor)
# If this is an eval graph, we don't need to add loss ops or an optimizer.
if not is_training:
return None, None, bottleneck_input, ground_truth_input, final_tensor
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(prediction, ground_truth_tensor)
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def run_final_eval(train_session, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor,
resized_image_tensor, bottleneck_tensor):
"""Runs a final evaluation on an eval graph using the test data set.
Args:
train_session: Session for the train graph with the tensors below.
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
image_lists: OrderedDict of training images for each label.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_image_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
"""
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(train_session, image_lists,
FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module))
(eval_session, _, bottleneck_input, ground_truth_input, evaluation_step,
prediction) = build_eval_session(module_spec, class_count)
test_accuracy, predictions = eval_session.run(
[evaluation_step, prediction],
feed_dict={
bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth
})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i]:
tf.logging.info('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
def build_eval_session(module_spec, class_count):
"""Builds an restored eval session without train operations for exporting.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
Returns:
Eval session containing the restored eval graph.
The bottleneck input, ground truth, eval step, and prediction tensors.
"""
# If quantized, we need to create the correct eval graph for exporting.
eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization = (
create_module_graph(module_spec))
eval_sess = tf.Session(graph=eval_graph)
with eval_graph.as_default():
# Add the new layer for exporting.
(_, _, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, FLAGS.final_tensor_name, bottleneck_tensor,
wants_quantization, is_training=False)
# Now we need to restore the values from the training graph to the eval
# graph.
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME)
evaluation_step, prediction = add_evaluation_step(final_tensor,
ground_truth_input)
return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,
evaluation_step, prediction)
def save_graph_to_file(graph_file_name, module_spec, class_count):
"""Saves an graph to file, creating a valid quantized one if necessary."""
sess, _, _, _, _, _ = build_eval_session(module_spec, class_count)
graph = sess.graph
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with tf.gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
def prepare_file_system():
# Set up the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def add_jpeg_decoding(module_spec):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
# Convert from full range of uint8 to range [0,1] of float32.
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
return jpeg_data, resized_image
def export_model(module_spec, class_count, saved_model_dir):
"""Exports model for serving.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: The number of classes.
saved_model_dir: Directory in which to save exported model and variables.
"""
# The SavedModel should hold the eval graph.
sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count)
with sess.graph.as_default() as graph:
tf.saved_model.simple_save(
sess,
saved_model_dir,
inputs={'image': in_image},
outputs={'prediction': graph.get_tensor_by_name('final_result:0')},
legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op')
)
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.image_dir:
tf.logging.error('Must set flag --image_dir.')
return -1
# Prepare necessary directories that can be used during training
prepare_file_system()
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
# Set up the pre-trained graph.
module_spec = hub.load_module_spec(FLAGS.tfhub_module)
graph, bottleneck_tensor, resized_image_tensor, wants_quantization = (
create_module_graph(module_spec))
# Add the new layer that we'll be training.
with graph.as_default():
(train_step, cross_entropy, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, FLAGS.final_tensor_name, bottleneck_tensor,
wants_quantization, is_training=True)
with tf.Session(graph=graph) as sess:
# Initialize all weights: for the module to their pretrained values,
# and for the newly added retraining layer to random initial values.
init = tf.global_variables_initializer()
sess.run(init)
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(module_spec)
if do_distort_images:
# We will be applying distortions, so set up the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, module_spec)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, _ = add_evaluation_step(final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Create a train saver that is used to restore values into an eval graph
# when exporting models.
train_saver = tf.train.Saver()
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.tfhub_module)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
# TODO: Make this use an eval graph, to avoid quantization
# moving averages being updated by the validation set, though in
# practice this makes a negligable difference.
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.tfhub_module))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
# If we want to do an intermediate save, save a checkpoint of the train
# graph, to restore into the eval graph.
train_saver.save(sess, CHECKPOINT_NAME)
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(intermediate_file_name, module_spec,
class_count)
# After training is complete, force one last save of the train checkpoint.
train_saver.save(sess, CHECKPOINT_NAME)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
run_final_eval(sess, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,
bottleneck_tensor)
# Write out the trained graph and labels with the weights stored as
# constants.
tf.logging.info('Save final result to : ' + FLAGS.output_graph)
if wants_quantization:
tf.logging.info('The model is instrumented for quantization with TF-Lite')
save_graph_to_file(FLAGS.output_graph, module_spec, class_count)
with tf.gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if FLAGS.saved_model_dir:
export_model(module_spec, class_count, FLAGS.saved_model_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--tfhub_module',
type=str,
default=(
'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1'),
help="""\
Which TensorFlow Hub module to use.
See https://github.com/tensorflow/hub/blob/master/docs/modules/image.md
for some publicly available ones.\
""")
parser.add_argument(
'--saved_model_dir',
type=str,
default='',
help='Where to save the exported graph.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
] | |
1913f94db48508d534a35b09a05eec732f312e5f | 2be43fdc9f328895b949c92ec4e7602fbbbf2ca3 | /tests/test_feeds.py | 20932f520aba76a42d6876c404625f2a4259e97f | [] | no_license | gjxlu/feedhq | 418a75ef68de759e7f85cf8b14f827b3fd5e5f27 | 3027b192e2c6f35ebb7f821c6d64e8eca49e4c44 | refs/heads/master | 2020-12-25T02:01:24.035251 | 2013-07-16T21:20:01 | 2013-07-16T21:36:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,214 | py | # -*- coding: utf-8 -*-
import feedparser
import json
from datetime import timedelta
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.utils import timezone
from django_push.subscriber.signals import updated
from django_webtest import WebTest
from httplib2 import Response
from mock import patch
from rache import schedule_job
from feedhq.feeds.models import Category, Feed, Entry, UniqueFeed
from feedhq.feeds.tasks import update_feed
from feedhq.feeds.templatetags.feeds_tags import smart_date
from feedhq.feeds.utils import USER_AGENT
from feedhq.profiles.models import User
from feedhq.wsgi import application # noqa
from .factories import UserFactory, CategoryFactory, FeedFactory, EntryFactory
from . import test_file, responses
class WebBaseTests(WebTest):
@patch('requests.get')
def test_welcome_page(self, get):
get.return_value = responses(304)
self.user = User.objects.create_user('testuser',
'[email protected]',
'pass')
user = UserFactory.create()
url = reverse('feeds:home')
response = self.app.get(url, user=user)
self.assertContains(response, 'Getting started')
FeedFactory.create(category__user=user, user=user)
response = self.app.get(url)
self.assertNotContains(response, 'Getting started')
def test_login_required(self):
url = reverse('feeds:home')
response = self.app.get(url, headers={'Accept': 'text/*'})
self.assertEqual(response.status_code, 200)
def test_homepage(self):
"""The homepage from a logged in user"""
user = UserFactory.create()
response = self.app.get(reverse('feeds:home'),
user=user)
self.assertContains(response, 'Home')
self.assertContains(response, user.username)
def test_unauth_homepage(self):
"""The home page from a logged-out user"""
response = self.app.get(reverse('feeds:home'))
self.assertContains(response, 'Sign in') # login required
def test_paginator(self):
user = UserFactory.create()
response = self.app.get(reverse('feeds:home', args=[5]),
user=user)
self.assertContains(response, 'Home')
def test_category(self):
user = UserFactory.create()
CategoryFactory.create(user=user, name=u'Cat yo')
url = reverse('feeds:category', args=['cat-yo'])
response = self.app.get(url, user=user)
self.assertContains(response, 'Cat yo')
@patch("requests.get")
def test_only_unread(self, get):
get.return_value = responses(304)
user = UserFactory.create()
category = CategoryFactory.create(user=user)
FeedFactory.create(category=category, user=user)
url = reverse('feeds:unread_category', args=[category.slug])
response = self.app.get(url, user=user)
self.assertContains(response, category.name)
self.assertContains(response, 'all <span class="ct">')
def test_add_category(self):
user = UserFactory.create()
url = reverse('feeds:add_category')
response = self.app.get(url, user=user)
form = response.forms['category']
response = form.submit()
self.assertFormError(response, 'form', 'name',
['This field is required.'])
form['name'] = 'New Name'
form['color'] = 'red'
response = form.submit()
self.assertRedirects(response, '/manage/')
# Re-submitting the same name fails
response = form.submit()
self.assertFormError(response, 'form', 'name',
['A category with this name already exists.'])
# Adding a category with a name generating the same slug.
# The slug will be different
form['name'] = 'New Name'
response = form.submit()
user.categories.get(slug='new-name-1')
self.assertRedirects(response, '/manage/')
# Now we add a category named 'add', which is a conflicting URL
form['name'] = 'add'
response = form.submit()
user.categories.get(slug='add-1')
self.assertRedirects(response, '/manage/')
# Add a category with non-ASCII names, slugify should cope
form['name'] = u'北京'
response = form.submit()
user.categories.get(slug='unknown')
self.assertRedirects(response, '/manage/')
form['name'] = u'北'
response = form.submit()
user.categories.get(slug='unknown-1')
self.assertRedirects(response, '/manage/')
form['name'] = u'京'
response = form.submit()
user.categories.get(slug='unknown-2')
self.assertRedirects(response, '/manage/')
def test_delete_category(self):
user = UserFactory.create()
category = CategoryFactory.create(user=user)
url = reverse('feeds:delete_category', args=[category.slug])
response = self.app.get(url, user=user)
self.assertEqual(response.status_code, 200)
self.assertEqual(Category.objects.count(), 1)
form = response.forms['delete']
response = form.submit().follow()
self.assertEqual(Category.objects.count(), 0)
@patch("requests.get")
def test_feed(self, get):
get.return_value = responses(304)
user = UserFactory.create()
feed = FeedFactory.create(category__user=user, user=user)
url = reverse('feeds:feed', args=[feed.pk])
response = self.app.get(url, user=user)
expected = (
'<a href="{0}unread/">unread <span class="ct">0</span></a>'
).format(feed.get_absolute_url())
self.assertContains(response, expected)
def test_edit_category(self):
user = UserFactory.create()
category = CategoryFactory.create(user=user)
url = reverse('feeds:edit_category', args=[category.slug])
response = self.app.get(url, user=user)
self.assertContains(response, u'Edit {0}'.format(category.name))
form = response.forms['category']
form['name'] = 'New Name'
form['color'] = 'blue'
response = form.submit().follow()
self.assertContains(response,
'New Name has been successfully updated')
@patch('requests.get')
def test_add_feed(self, get):
get.return_value = responses(304)
user = UserFactory.create()
category = CategoryFactory.create(user=user)
url = reverse('feeds:add_feed')
response = self.app.get(url, user=user)
self.assertContains(response, 'Add a feed')
form = response.forms['feed']
form['name'] = 'Lulz'
response = form.submit() # there is no URL
self.assertFormError(response, 'form', 'url',
['This field is required.'])
form['name'] = 'Bobby'
form['url'] = 'http://example.com/feed.xml'
form['category'] = category.pk
response = form.submit()
self.assertFormError(response, 'form', 'url', [
"Invalid response code from URL: HTTP 304.",
])
get.return_value = responses(200, 'categories.opml')
response = form.submit()
self.assertFormError(response, 'form', 'url', [
"This URL doesn't seem to be a valid feed.",
])
get.return_value = responses(200, 'bruno.im.png')
response = form.submit()
self.assertFormError(response, 'form', 'url', [
"This URL doesn't seem to be a valid feed.",
])
cache_key = "lock:feed_check:{0}".format(user.pk)
cache._client.set(cache_key, user.pk)
response = form.submit()
self.assertFormError(response, 'form', 'url', [
"This action can only be done one at a time.",
])
cache._client.delete(cache_key)
get.return_value = responses(200, 'brutasse.atom')
response = form.submit()
self.assertRedirects(response, '/manage/')
response.follow()
response = form.submit()
self.assertFormError(
response, 'form', 'url',
["It seems you're already subscribed to this feed."])
# Provide initial params via ?feed=foo&name=bar
response = self.app.get(url, {'feed': 'https://example.com/blog/atom',
'name': 'Some Example Blog'})
self.assertContains(response, 'value="https://example.com/blog/atom"')
self.assertContains(response, 'value="Some Example Blog"')
get.side_effect = ValueError
user.feeds.all().delete()
response = form.submit()
self.assertFormError(response, 'form', 'url',
['Error fetching the feed.'])
def test_feed_url_validation(self):
user = UserFactory.create()
category = CategoryFactory.create(user=user)
url = reverse('feeds:add_feed')
response = self.app.get(url, user=user)
form = response.forms['feed']
form['name'] = 'Test'
form['url'] = 'ftp://example.com'
form['category'] = category.pk
response = form.submit()
self.assertFormError(
response, 'form', 'url',
"Invalid URL scheme: 'ftp'. Only HTTP and HTTPS are supported.",
)
for invalid_url in ['http://localhost:8000', 'http://localhost',
'http://127.0.0.1']:
form['url'] = invalid_url
response = form.submit()
self.assertFormError(response, 'form', 'url', "Invalid URL.")
@patch("requests.get")
def test_edit_feed(self, get):
get.return_value = responses(304)
user = UserFactory.create()
feed = FeedFactory.create(user=user)
url = reverse('feeds:edit_feed', args=[feed.pk])
response = self.app.get(url, user=user)
self.assertContains(response, feed.name)
form = response.forms['feed']
form['name'] = 'New Name'
form['url'] = 'http://example.com/newfeed.xml'
get.return_value = responses(200, 'brutasse.atom')
response = form.submit().follow()
self.assertContains(response, 'New Name has been successfully updated')
cat = CategoryFactory.create(user=user)
response = self.app.get(url, user=user)
form = response.forms['feed']
form['category'] = cat.pk
response = form.submit().follow()
self.assertContains(response, 'New Name has been successfully updated')
self.assertEqual(Feed.objects.get().category_id, cat.pk)
@patch("requests.get")
def test_delete_feed(self, get):
get.return_value = responses(304)
user = UserFactory.create()
feed = FeedFactory.create(category__user=user, user=user)
url = reverse('feeds:delete_feed', args=[feed.pk])
response = self.app.get(url, user=user)
self.assertContains(response, 'Delete')
self.assertContains(response, feed.name)
self.assertEqual(Feed.objects.count(), 1)
response = response.forms['delete'].submit()
self.assertEqual(response.status_code, 302)
self.assertEqual(Feed.objects.count(), 0)
# Redirects to home so useless to test
@patch("requests.get")
def test_invalid_page(self, get):
get.return_value = responses(304)
# We need more than 25 entries
user = UserFactory.create()
FeedFactory.create(category__user=user, user=user)
url = reverse('feeds:home', args=[12000]) # that page doesn't exist
response = self.app.get(url, user=user)
self.assertContains(response, '<a href="/" class="current">')
# This is called by other tests
def _test_entry(self, from_url, user):
self.assertEqual(self.app.get(
from_url, user=user).status_code, 200)
e = Entry.objects.get(title="jacobian's django-deployment-workshop")
url = reverse('feeds:item', args=[e.pk])
response = self.app.get(url, user=user)
self.assertContains(response, "jacobian's django-deployment-workshop")
@patch('requests.get')
def test_entry(self, get):
user = UserFactory.create()
get.return_value = responses(200, 'sw-all.xml')
feed = FeedFactory.create(category__user=user, user=user)
url = reverse('feeds:home')
self._test_entry(url, user)
url = reverse('feeds:unread')
self._test_entry(url, user)
url = reverse('feeds:stars')
self._test_entry(url, user)
url = reverse('feeds:category', args=[feed.category.slug])
self._test_entry(url, user)
url = reverse('feeds:unread_category', args=[feed.category.slug])
self._test_entry(url, user)
url = reverse('feeds:feed', args=[feed.pk])
self._test_entry(url, user)
url = reverse('feeds:unread_feed', args=[feed.pk])
self._test_entry(url, user)
feed.category = None
feed.save()
self._test_entry(url, user)
@patch('requests.get')
def test_custom_ordering(self, get):
user = UserFactory.create()
get.return_value = responses(200, 'sw-all.xml')
FeedFactory.create(user=user, category__user=user)
url = reverse('feeds:unread')
response = self.app.get(url, user=user)
first_title = response.context['entries'].object_list[0].title
last_title = response.context['entries'].object_list[-1].title
user.oldest_first = True
user.save()
response = self.app.get(url, user=user)
self.assertEqual(response.context['entries'].object_list[0].title,
last_title)
self.assertEqual(response.context['entries'].object_list[-1].title,
first_title)
@patch('requests.get')
def test_last_entry(self, get):
user = UserFactory.create()
get.return_value = responses(200, 'sw-all.xml')
feed = FeedFactory.create(category__user=user, user=user)
with self.assertNumQueries(2):
update_feed(feed.url)
self.assertEqual(Feed.objects.get().unread_count,
user.entries.filter(read=False).count())
last_item = user.entries.order_by('date')[0]
url = reverse('feeds:item', args=[last_item.pk])
response = self.app.get(url, user=user)
self.assertNotContains(response, 'Next →')
def test_not_mocked(self):
with self.assertRaises(ValueError):
FeedFactory.create()
@patch("requests.get")
def test_img(self, get):
get.return_value = responses(304)
user = UserFactory.create()
feed = FeedFactory.create(category__user=user, url='http://exmpl.com',
user=user)
entry = Entry.objects.create(
feed=feed,
title="Random title",
subtitle='<img src="/favicon.png">',
link='http://example.com',
date=timezone.now(),
user=user,
)
url = reverse('feeds:item', args=[entry.pk])
response = self.app.get(url, user=user)
self.assertContains(response, 'External media is hidden')
self.assertNotContains(response,
'<img src="http://exmpl.com/favicon.png">')
self.assertEqual(Feed.objects.get(pk=feed.pk).media_safe, False)
form = response.forms['images']
response = form.submit(name='once')
self.assertContains(response, 'Always display external media')
self.assertContains(response,
'<img src="http://exmpl.com/favicon.png">')
self.assertEqual(Feed.objects.get(pk=feed.pk).media_safe, False)
form = response.forms['images']
response = form.submit(name='always')
self.assertContains(response, 'Disable external media')
self.assertContains(response,
'<img src="http://exmpl.com/favicon.png">')
self.assertEqual(Feed.objects.get(pk=feed.pk).media_safe, True)
form = response.forms['images']
response = form.submit(name='never')
self.assertNotContains(response, 'Disable external media')
self.assertEqual(Feed.objects.get(pk=feed.pk).media_safe, False)
user.allow_media = True
user.save(update_fields=['allow_media'])
response = form.submit(name='never')
self.assertFalse('images' in response.forms)
self.assertContains(response,
'<img src="http://exmpl.com/favicon.png">')
@patch("requests.get")
def test_actions(self, get):
get.return_value = responses(304)
user = UserFactory.create()
feed = FeedFactory.create(category__user=user, url='http://exmpl.com',
user=user)
entry = Entry.objects.create(
feed=feed,
title="Random title",
subtitle='Foo bar content',
link='http://example.com',
date=timezone.now(),
user=user,
)
url = reverse('feeds:item', args=[entry.pk])
response = self.app.get(url, user=user)
token = response.forms['unread'].fields['csrfmiddlewaretoken'][0].value
response = self.app.post(url, {'action': 'invalid',
'csrfmiddlewaretoken': token},
user=user)
form = response.forms['star']
response = form.submit()
self.assertTrue(Entry.objects.get().starred)
form = response.forms['star']
response = form.submit()
self.assertFalse(Entry.objects.get().starred)
user.oldest_first = True
user.save(update_fields=['oldest_first'])
form = response.forms['unread']
response = form.submit()
self.assertFalse(Entry.objects.get().read)
@patch('requests.get')
def test_opml_import(self, get):
user = UserFactory.create()
url = reverse('feeds:import_feeds')
response = self.app.get(url, user=user)
get.return_value = responses(304)
form = response.forms['import']
with open(test_file('sample.opml'), 'r') as opml_file:
form['file'] = 'sample.opml', opml_file.read()
response = form.submit().follow()
self.assertContains(response, '2 feeds have been imported')
# Re-import
with open(test_file('sample.opml'), 'r') as opml_file:
form['file'] = 'sample.opml', opml_file.read()
response = form.submit().follow()
self.assertContains(response, '0 feeds have been imported')
# Import an invalid thing
form['file'] = 'invalid', "foobar"
response = form.submit()
self.assertFormError(response, 'form', 'file', [
"This file doesn't seem to be a valid OPML file."
])
# Empty file
form['file'] = 'name', ""
response = form.submit()
self.assertFormError(response, 'form', 'file', [
"The submitted file is empty."
])
@patch('requests.get')
def test_greader_opml_import(self, get):
user = UserFactory.create()
url = reverse('feeds:import_feeds')
response = self.app.get(url, user=user)
get.return_value = responses(304)
form = response.forms['import']
with open(test_file('google-reader-subscriptions.xml'),
'r') as opml_file:
form['file'] = 'sample.opml', opml_file.read()
response = form.submit().follow()
self.assertContains(response, '1 feed has been imported')
self.assertEqual(Category.objects.count(), 0)
@patch('requests.get')
def test_categories_in_opml(self, get):
user = UserFactory.create()
url = reverse('feeds:import_feeds')
response = self.app.get(url, user=user)
self.assertEqual(response.status_code, 200)
get.return_value = responses(304)
form = response.forms["import"]
with open(test_file('categories.opml'), 'r') as opml_file:
form['file'] = 'categories.opml', opml_file.read()
response = form.submit().follow()
self.assertContains(response, '20 feeds have been imported')
self.assertEqual(user.categories.count(), 6)
with self.assertRaises(Category.DoesNotExist):
user.categories.get(name='Imported')
with self.assertRaises(Feed.DoesNotExist):
Feed.objects.get(
category__in=user.categories.all(),
name='No title',
)
for c in Category.objects.all():
c.get_absolute_url()
@patch('requests.get')
def test_dashboard(self, get):
get.return_value = responses(304)
user = UserFactory.create()
url = reverse('feeds:dashboard')
FeedFactory.create(category=None, user=user)
for i in range(5):
FeedFactory.create(category__user=user, user=user)
response = self.app.get(url, user=user)
self.assertContains(response, 'Dashboard')
@patch('requests.get')
def test_unread_count(self, get):
"""Unread feed count everywhere"""
user = UserFactory.create()
url = reverse('profile')
response = self.app.get(url, user=user)
self.assertContains(
response,
'<a class="unread" title="Unread entries" href="/unread/">0</a>'
)
get.return_value = responses(200, 'sw-all.xml')
FeedFactory.create(category__user=user, user=user)
response = self.app.get(url, user=user)
self.assertContains(
response,
'<a class="unread" title="Unread entries" href="/unread/">30</a>'
)
@patch('requests.get')
def test_mark_as_read(self, get):
get.return_value = responses(304)
user = UserFactory.create()
feed = FeedFactory.create(category__user=user, user=user)
url = reverse('feeds:unread')
response = self.app.get(url, user=user)
self.assertNotContains(response, '"Mark all as read"')
get.return_value = responses(200, 'sw-all.xml')
update_feed(feed.url)
response = self.app.get(url, user=user)
self.assertContains(response, '"Mark all as read"')
form = response.forms['read']
response = form.submit()
self.assertRedirects(response, url)
response = response.follow()
self.assertContains(response, '30 entries have been marked as read')
self.assertEqual(user.entries.filter(read=False).count(), 0)
self.assertEqual(user.entries.filter(read=True).count(), 30)
form = response.forms['undo']
response = form.submit()
self.assertRedirects(response, url)
response = response.follow()
self.assertContains(response, "30 entries have been marked as unread")
self.assertEqual(user.entries.filter(read=False).count(), 30)
self.assertEqual(user.entries.filter(read=True).count(), 0)
@patch('requests.get')
def test_promote_html_content_type(self, get):
get.return_value = responses(200, 'content-description.xml')
feed = FeedFactory.create()
self.assertEqual(
len(feed.entries.all()[0].content.split('Février 1953')), 2)
@patch('requests.get')
@patch('oauth2.Client')
def test_add_to_readability(self, Client, get): # noqa
client = Client.return_value
r = Response({
'status': 202,
'reason': 'Accepted',
'location': '/api/rest/v1/bookmarks/119',
'x-article-location': '/api/rest/v1/articles/xj28dwkx',
})
value = json.dumps({'article': {'id': 'foo'}})
client.request.return_value = [r, value]
user = UserFactory.create(
read_later='readability',
read_later_credentials=json.dumps({
'oauth_token': 'token',
'oauth_token_secret': 'token secret',
}),
)
get.return_value = responses(200, 'sw-all.xml')
feed = FeedFactory.create(category__user=user, user=user)
get.assert_called_with(
feed.url,
headers={'User-Agent': USER_AGENT % '1 subscriber',
'Accept': feedparser.ACCEPT_HEADER}, timeout=10)
entry_pk = Entry.objects.all()[0].pk
url = reverse('feeds:item', args=[entry_pk])
response = self.app.get(url, user=user)
self.assertContains(response, "Add to Readability")
form = response.forms['read-later']
response = form.submit()
client.request.assert_called_with('/api/rest/v1/bookmarks/119',
method='GET')
self.assertEqual(Entry.objects.get(pk=entry_pk).read_later_url,
'https://www.readability.com/articles/foo')
response = self.app.get(url, user=user)
self.assertNotContains(response, "Add to Instapaper")
@patch("requests.get")
@patch('oauth2.Client')
def test_add_to_instapaper(self, Client, get): # noqa
client = Client.return_value
r = Response({'status': 200})
client.request.return_value = [
r,
json.dumps([{'type': 'bookmark', 'bookmark_id': 12345,
'title': 'Some bookmark',
'url': 'http://example.com/some-bookmark'}])
]
user = UserFactory.create(
read_later='instapaper',
read_later_credentials=json.dumps({
'oauth_token': 'token',
'oauth_token_secret': 'token secret',
}),
)
get.return_value = responses(304)
feed = FeedFactory.create(category__user=user, user=user)
get.return_value = responses(200, 'sw-all.xml')
update_feed(feed.url)
get.assert_called_with(
feed.url,
headers={'User-Agent': USER_AGENT % '1 subscriber',
'Accept': feedparser.ACCEPT_HEADER}, timeout=10)
entry_pk = Entry.objects.all()[0].pk
url = reverse('feeds:item', args=[entry_pk])
response = self.app.get(url, user=user)
self.assertContains(response, "Add to Instapaper")
form = response.forms['read-later']
response = form.submit()
body = 'url=http%3A%2F%2Fsimonwillison.net%2F2010%2FMar%2F12%2Fre2%2F'
client.request.assert_called_with(
'https://www.instapaper.com/api/1/bookmarks/add',
body=body,
method='POST',
)
self.assertEqual(Entry.objects.get(pk=entry_pk).read_later_url,
'https://www.instapaper.com/read/12345')
response = self.app.get(url, user=user)
self.assertNotContains(response, "Add to Instapaper")
@patch('requests.get')
@patch('requests.post')
def test_add_to_readitlaterlist(self, post, get):
user = UserFactory.create(
read_later='readitlater',
read_later_credentials=json.dumps({'username': 'foo',
'password': 'bar'}),
)
get.return_value = responses(200, 'sw-all.xml')
feed = FeedFactory.create(category__user=user, user=user)
get.assert_called_with(
feed.url,
headers={'User-Agent': USER_AGENT % '1 subscriber',
'Accept': feedparser.ACCEPT_HEADER}, timeout=10)
url = reverse('feeds:item', args=[Entry.objects.all()[0].pk])
response = self.app.get(url, user=user)
self.assertContains(response, 'Add to Read it later')
form = response.forms['read-later']
response = form.submit()
# Read it Later doesn't provide the article URL so we can't display a
# useful link
self.assertContains(response, "added to your reading list")
post.assert_called_with(
'https://readitlaterlist.com/v2/add',
data={u'username': u'foo',
'url': u'http://simonwillison.net/2010/Mar/12/re2/',
'apikey': 'test read it later API key',
u'password': u'bar',
'title': (u'RE2: a principled approach to regular '
u'expression matching')},
)
@patch('requests.get')
def test_pubsubhubbub_handling(self, get):
user = UserFactory.create()
url = 'http://bruno.im/atom/tag/django-community/'
get.return_value = responses(304)
feed = FeedFactory.create(url=url, category__user=user, user=user)
get.assert_called_with(
url, headers={'User-Agent': USER_AGENT % '1 subscriber',
'Accept': feedparser.ACCEPT_HEADER},
timeout=10)
self.assertEqual(feed.entries.count(), 0)
path = test_file('bruno.im.atom')
with open(path, 'r') as f:
data = f.read()
updated.send(sender=None, notification=data, request=None, links=None)
self.assertEqual(feed.entries.count(), 5)
# Check content handling
for entry in feed.entries.all():
self.assertTrue(len(entry.subtitle) > 2400)
# Check date handling
self.assertEqual(feed.entries.filter(date__year=2011).count(), 3)
self.assertEqual(feed.entries.filter(date__year=2012).count(), 2)
@patch('requests.get')
def test_missing_links(self, get):
path = test_file('no-rel.atom')
with open(path, 'r') as f:
data = f.read()
updated.send(sender=None, notification=data, request=None, links=None)
@patch('requests.get')
def test_link_headers(self, get):
user = UserFactory.create()
url = 'foo'
get.return_value = responses(304)
feed = FeedFactory.create(url=url, category__user=user, user=user)
path = test_file('no-rel.atom')
with open(path, 'r') as f:
data = f.read()
updated.send(sender=None, notification=data, request=None,
links=[{'url': 'foo', 'rel': 'self'}])
self.assertEqual(feed.entries.count(), 1)
@patch('requests.get')
def test_subscribe_url(self, get):
get.return_value = responses(304)
user = UserFactory.create()
c = CategoryFactory.create(user=user)
url = reverse('feeds:subscribe')
response = self.app.get(url, {'feeds': "http://bruno.im/atom/latest/"},
user=user)
self.assertContains(response, 'value="http://bruno.im/atom/latest/"')
form = response.forms['subscribe']
response = form.submit()
self.assertContains(response, 'This field is required.', 1)
form['form-0-name'] = "Bruno's awesome blog"
form['form-0-category'] = c.pk
self.assertEqual(Feed.objects.count(), 0)
response = form.submit().follow()
self.assertEqual(Feed.objects.count(), 1)
form['form-0-name'] = ""
form['form-0-category'] = ""
form['form-0-subscribe'] = False
response = form.submit().follow()
self.assertContains(response, '0 feeds have been added')
form['form-0-name'] = 'Foo'
form['form-0-category'] = c.pk
form['form-0-subscribe'] = True
response = form.submit()
self.assertContains(response, "already subscribed")
UniqueFeed.objects.create(url='http://example.com/feed',
title='Awesome')
response = self.app.get(
url, {'feeds': ",".join(['http://bruno.im/atom/latest/',
'http://example.com/feed'])})
form = response.forms['subscribe']
self.assertEqual(form['form-0-name'].value, 'Awesome')
response = form.submit().follow()
self.assertEqual(Feed.objects.count(), 2)
def test_bookmarklet_no_feed(self):
user = UserFactory.create()
url = reverse('feeds:subscribe')
response = self.app.get(url, {'url': 'http://isitbeeroclock.com/'},
user=user)
self.assertContains(
response, ('it looks like there are no feeds available on '
'<a href="http://isitbeeroclock.com/">'))
@patch("requests.get")
def test_relative_links(self, get):
get.return_value = responses(200, path='brutasse.atom')
user = UserFactory.create()
FeedFactory.create(category__user=user, user=user,
url='https://github.com/brutasse.atom')
entry = user.entries.all()[0]
self.assertTrue('<a href="/brutasse"' in entry.subtitle)
self.assertFalse('<a href="/brutasse"' in entry.content)
self.assertTrue(
'<a href="https://github.com/brutasse"' in entry.content)
feed = Feed(url='http://standblog.org/blog/feed/rss2')
e = Entry(feed=feed, subtitle=(
' <p><img alt=":-)" class="smiley"'
'src="/dotclear2/themes/default/smilies/smile.png" /> . </p>'
))
self.assertTrue(('src="http://standblog.org/dotclear2/themes/'
'default/smilies/smile.png"') in e.content)
@patch('requests.get')
def test_empty_subtitle(self, get):
get.return_value = responses(304)
user = UserFactory.create()
entry = EntryFactory(user=user, feed__category__user=user, subtitle='')
url = reverse('feeds:item', args=[entry.pk])
self.app.get(url, user=user)
def test_smart_date(self):
now = timezone.now()
self.assertEqual(len(smart_date(now)), 5)
if now.day != 1 and now.month != 1: # Can't test this on Jan 1st :)
now = now - timedelta(days=1)
self.assertEqual(len(smart_date(now)), 6)
now = now - timedelta(days=366)
self.assertEqual(len(smart_date(now)), 12)
@patch('requests.get')
def test_manage_feed(self, get):
get.return_value = responses(304)
user = UserFactory.create()
url = reverse('feeds:manage')
response = self.app.get(url, user=user)
self.assertContains(response, 'Manage feeds')
FeedFactory.create(user=user, category=None)
FeedFactory.create(user=user, category=None)
FeedFactory.create(user=user, category=None)
unique = UniqueFeed.objects.all()[0]
schedule_job(unique.url, schedule_in=0, backoff_factor=10,
error=UniqueFeed.NOT_A_FEED)
response = self.app.get(url, user=user)
self.assertContains(response, 'Not a valid RSS/Atom feed')
schedule_job(unique.url, schedule_in=0, error='blah')
response = self.app.get(url, user=user)
self.assertContains(response, 'Error')
unique.muted = True
unique.save()
response = self.app.get(url, user=user)
self.assertContains(response, 'Error')
| [
"[email protected]"
] | |
7dc97366d633f77383f6583d6130cf3b33f3367b | 164ae54c1aaf1cf84f339ec672e4426f53ff83ea | /app_pedidos_django/applications/usuarios/managers.py | 16b91163a44b4dadd465f2e79f4b436a4c0f39b4 | [] | no_license | JaviOlaya/Pedidos_Django | 39a28ac0eba96c16e1df54e0678dd195dbf9c96c | 8706f1f9ae114ed32bccf85036c47317eeb0b674 | refs/heads/main | 2023-06-22T04:22:56.907521 | 2021-07-17T11:46:48 | 2021-07-17T11:46:48 | 382,832,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | from django.db import models
#
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager, models.Manager):
def _create_user(self,username, email, password, is_staff, is_active, is_superuser, **extra_fields):
user = self.model(
username = username,
email=email,
is_staff=is_staff,
is_active = is_active,
is_superuser = is_superuser,
**extra_fields
)
# Se encripta la contraseña
user.set_password(password)
user.save(using=self.db)
return user
def create_user (self,username, email, password=None, **extra_fields):
return self._create_user(username, email, password, False,True,False, **extra_fields)
def create_superuser(self, username, email, password = None, **extrafields):
return self._create_user(username, email, password,True,True, True, **extrafields) | [
"[email protected]"
] | |
02594a0628527d5a727933e55884e5da274eb26f | 53832bd168b797827be77bface570a2bea8952f1 | /aidlyapi.py | 7639a37116f1edd0735568b2643646a7a52352be | [] | no_license | Bryan1010/AidlyBackend | 840bbdf290d65577a37b17cc99fc1162177e0f02 | f0fd9215374713a077460876ab3069d5d556c66f | refs/heads/master | 2021-02-09T02:36:37.825383 | 2020-11-08T04:22:38 | 2020-11-08T04:22:38 | 244,229,180 | 2 | 0 | null | 2020-11-08T04:22:40 | 2020-03-01T21:50:21 | Python | UTF-8 | Python | false | false | 274 | py | from mainapp import MAIN_APP
# from models.company import Company
from flask import request, Response
from models import db
@MAIN_APP.route("/")
def hello():
return "<h1 style='color:blue'>Hello There!</h1>"
if __name__ == "__main__":
MAIN_APP.run(host='0.0.0.0')
| [
"[email protected]"
] | |
c114e9e4c5fbe43f5efbc36d8ddc04c35dd32490 | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_08_Code/4375OS_08_12_Series.py | f279f6cc3587504d87af31fda1b21a119cea0200 | [] | no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | """
Name : 4375OS_08_12_Series.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : [email protected]
[email protected]
"""
import pandas as pd
x = pd.date_range('1/1/2013', periods=252)
data = pd.Series(randn(len(x)), index=x)
print data.head()
print data.tail()
| [
"[email protected]"
] | |
1f0bf5a3dd27a4dd078d0aad47999b7ab0f0ac0c | ec038a8ab77ce2e62316d91ef06cab6c5d02256a | /MPSI/CH02/TP.py | 23dc4c70d57d42e647f884c44eba4939a317f51e | [] | no_license | brahimbakkas/cpge | b4d9a9fe4b54dde2e4b5d40406aa3e6dcac59cc9 | 632f6da5c15806cf6aa48b4787c716bb73481d24 | refs/heads/master | 2020-05-26T01:44:32.468383 | 2019-10-25T23:08:30 | 2019-10-25T23:08:30 | 188,064,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | #
#
for i in range (n):
print(i)
| [
"[email protected]"
] | |
016c75557647665c5a3773b8cf354ade5c11502f | 941c912f44beff33a072e086c1f561f6cdd64626 | /LeetCode/codes/22.py | 84f0db4955118accd480b9d684a7ae03a363e1dc | [] | no_license | adreena/MyStudyCorner | 3a13a743769ed144965b767f547c16df4d0fa0dd | 355c0dbd32ad201800901f1bcc110550696bc96d | refs/heads/master | 2023-02-20T07:39:32.391421 | 2021-01-25T01:46:21 | 2021-01-25T01:46:21 | 255,104,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # time catalan numbers (2n n)*1/n
# space: catalan numbers
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
self.outputs = []
def helper(n_left, n_right, output):
if n_left == 0 and n_right == 0:
self.outputs.append(output)
else:
if n_left>0:
helper(n_left-1, n_right, output+'(')
if n_right>n_left:
helper(n_left, n_right-1, output+')')
helper(n,n,'')
return self.outputs | [
"[email protected]"
] | |
2cbf5758e46478db0c2adedb0c1158e442574bfb | 13bed5cd72c63182592297e88e3eb5acc574d5ab | /src/iis.tac | dac670e8c4dbac064e986c93116a12d8922dcb4c | [] | no_license | itsbth/itsinfoscreen | 951ddc5a772adfa27bf5258733ed20e8e3f65a36 | b6921c7734297bd49144e68e6c9ccb67b055cea8 | refs/heads/master | 2016-09-06T12:38:41.490698 | 2010-01-12T19:05:12 | 2010-01-12T19:05:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,752 | tac | import os, sys
sys.path.append(os.getcwd())
from twisted.application import service, internet
from nevow import appserver
from nevow import rend
from nevow import inevow
from nevow.static import File, Data
import json, random
from widget import WidgetManager
man = WidgetManager()
class JSONData(rend.Page):
isLeaf = True
def __init__(self, data):
self.data = json.dumps(data)
def renderHTTP(self, ctx):
request = inevow.IRequest(ctx)
request.setHeader("content-type", "text/json")
request.setHeader("content-length", str(len(self.data)))
return self.data
class Update(rend.Page):
def renderHTTP(self, ctx):
global man
return json.dumps(man.update())
class WidgetHandler(rend.Page):
def locateChild(self, ctx, segments):
name, action = segments
if action in ('html', 'js', 'css'):
return (File('./widgets/' + name + '/widget.' + action), ())
elif action == 'add':
global man
return (JSONData(man.add(name)), ())
elif action == 'remove':
pass
return (MainPage(), ())
class MainPage(rend.Page):
children = {'static': File('./static'), 'update': Update(), 'widget': WidgetHandler()}
def renderHTTP (self, ctx):
return 'N/A'
######################################################################
# Nevow Boilerplate
######################################################################
application = service.Application("itsinfoscreen")
port = 8080
res = MainPage()
site = appserver.NevowSite(res)
webService = internet.TCPServer(port, site)
webService.setServiceParent(application) | [
"[email protected]"
] | |
e921b0a4952a5165f6b62a4404df543360cc2a60 | 4cce9cd38bb82ee0a7bb5099b4d4db4ec4acce78 | /src/util.py | de37b5de4c7057f4adad5f3e5280faffff0db0f6 | [] | no_license | VertexToEdge/othello-server | 08f26748b577eae308517788a4e86296974c0aa0 | 06c59e6b5ac6adb4c53bb2d0eb523c86f9b1e7a5 | refs/heads/master | 2020-04-23T15:17:03.108482 | 2019-02-18T10:16:16 | 2019-02-18T10:16:16 | 171,259,893 | 1 | 0 | null | 2019-02-18T10:05:51 | 2019-02-18T10:05:51 | null | UTF-8 | Python | false | false | 478 | py | import struct
import json
def serialize(msg):
body = json.dumps(msg).encode()
msg_len = struct.pack('>L', len(body))
return msg_len + body
def deserialize(sock):
_msg_len = sock.recv(4)
if len(_msg_len) < 4:
raise ConnectionResetError
msg_len = struct.unpack('>L', _msg_len)[0]
msg_raw = sock.recv(msg_len)
while len(msg_raw) < msg_len:
msg_raw += sock.recv(msg_len - len(msg_raw))
msg = json.loads(msg_raw)
return msg
| [
"[email protected]"
] | |
087d6cf19f4743bba424fb5759ae4dfc7444dac8 | 72521bf31e438e49e6a26941e44523a9b54ef351 | /f1_robot/build/testbot_description/catkin_generated/pkg.installspace.context.pc.py | 4ac95d36922a9dbd7d67dba34464dfcfde047276 | [] | no_license | rossonet/braccio_ros | 04d8ccc8c70b62fdc28f166a8da58b4289414b5c | bde03a827f92c8c90764bd3ba606fbd2eeded8a2 | refs/heads/master | 2021-07-08T22:30:10.856911 | 2017-10-04T15:52:47 | 2017-10-04T15:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "testbot_description"
PROJECT_SPACE_DIR = "/home/rossonet/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
3758f1d2fc628019ffb27cc8d9c833c5cdee2c3f | f0000766ad3a5c47834a59144207e5e411749c19 | /python/get_commands.py | eb239d4f6c7e17d7adafa35df4a949945b44ba53 | [] | no_license | GitBolt/discord-slash-guide | f972f0bccf7dc0829f2934b59628a5e6de6258f4 | 886adac4b210df57af8ed41ccdac94f21a8b0ee7 | refs/heads/master | 2023-07-26T10:44:45.503744 | 2021-09-06T07:50:17 | 2021-09-06T07:50:17 | 403,006,058 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | """
This is to get all commands, not to be confused with `get_command.py`
which returns details of a particular command.
"""
import os
import requests
TOKEN = os.environ["TOKEN"] or os.getenv["TOKEN"]
HEADERS = {"Authorization": f"Bot {TOKEN}"}
def get_global_commands(application_id: int) -> str:
"""
https://discord.com/developers/docs/interactions/application-commands#get-global-application-commands
Get globally registered slash commands.
Takes the application(bot) ID as a single arguments.
"""
url = f"https://discord.com/api/v9/applications/{application_id}/commands"
res = requests.get(url, headers=HEADERS)
return (
f"Response code: {res.status_code}\n"
f"Total commands: {len(res.json())}\n"
f"JSON text: {res.json()}\n"
)
def get_guild_commands(application_id: int, guild_id: int) -> str:
"""
https://discord.com/developers/docs/interactions/application-commands#get-guild-application-commands
Get guild specfic registered slash commands.
Takes the application (bot) ID and guild ID respectively as arguments.
"""
url = f"https://discord.com/api/v9/applications/{application_id}/guilds/{guild_id}/commands"
res = requests.get(url, headers=HEADERS)
return (
f"Response code: {res.status_code}\n"
f"Total commands: {len(res.json())}\n"
f"JSON text: {res.json()}\n"
)
# print(get_global_commands(884099662653562961))
# print(get_guild_commands(884099662653562961, 845726630231932980))
| [
"[email protected]"
] | |
2764e974be54c42119a3c178925548b4bf99e8df | 08e84e6190e7fa25a3948344defd73c602c168cc | /views.py | 722f56acace4cda306b93e446543d881716c2259 | [] | no_license | dimmoon69/django-cdbforms | f053bc19765e4c8ebe512feef2b55d4632bc7c8d | 58c1e3aab4455073176da7c28d23cdfd7badad94 | refs/heads/master | 2021-12-04T11:44:07.989876 | 2011-03-18T14:08:57 | 2011-03-18T14:08:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect
from django.template.loader import get_template
from django.template import Context, RequestContext
from models import *
from forms import *
def cdbform(request, template):
if request.method == 'POST':
form = CDBForm(template=template, data=request.POST)
if form.is_valid():
data = form.save()
tmpl = get_template('sample.html')
html = tmpl.render(RequestContext(request, {'data': data}))
return HttpResponse(html)
else:
form = CDBForm(template=template)
tmpl = get_template('sample.html')
html = tmpl.render(RequestContext(request, {'form': form}))
return HttpResponse(html)
| [
"fedor.tyurin@3da375b8-c7c2-11dd-94fd-9de6169c3690"
] | fedor.tyurin@3da375b8-c7c2-11dd-94fd-9de6169c3690 |
a7f65228a1a5c133e9d806213124e862f5e27193 | fab99203d20d8755a055045b5117ea8a77ab9597 | /spanningForest/txn2Phase/tools/run_once.py | f59a2b1c3f648ed2edae85039fe10d7491591f62 | [] | no_license | yuxiamit/LiTM | 2ee787a9d8b433fbfbb1219e9355a9a0512e8dab | d118033ed4dd3b42f45b85f61908015fac8b5293 | refs/heads/master | 2020-04-18T12:08:47.797136 | 2019-08-19T19:57:01 | 2019-08-19T19:57:01 | 167,524,994 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | batchRanges =[
#500, 700, 1000, 2000, 3000, 5000,
10000, 20000, 50000, 75000, 100000, 200000] # [100, 200, 300, 500, 700, 1000, 2000, 3000, 5000, 10000, 20000, 50000, 75000, 100000, 200000]
import os
import sys
import subprocess
trials = 10
executable = "MIS"
inputFile = "randTI"
if len(sys.argv) > 1:
# inputFile = sys.argv[1]
executable = sys.argv[1]
timeList = []
def avg(l):
return float(sum(l))/len(l)
for batchSize in batchRanges:
currentL = []
for t in range(trials):
ret = subprocess.check_output(["numactl -i all -- ./" + executable, "-b", str(batchSize), inputFile], shell=True)
time = float(ret.split('PBBS-time:')[1].strip())
currentL.append(time)
timeList.append(avg(currentL))
print batchSize, time, currentL
print timeList
resultSEQ = '''100 0.548
200 0.281
300 0.214
500 0.146
700 0.11
1000 0.0914
2000 0.0869
3000 0.0636
5000 0.0585
10000 0.0647
20000 0.0723
50000 0.127
1000000 0.641'''
resultDET = '''100 0.0562
200 0.0494
300 0.0504
500 0.0495
700 0.0535
1000 0.0496
2000 0.0533
3000 0.0511
5000 0.0559
10000 0.0602
20000 0.075
50000 0.103
1000000 0.495'''
| [
"[email protected]"
] | |
0ad5c07617722f7b2b34d3993e6f969784a7bcd9 | 80a92f0aab314b590d718e9fa602551d75a3728f | /rules.py | 6cbbc80819bf26074836953c046115143af06c0c | [] | no_license | clauszitzelsberger/Schafkopf_RL | 54b3040a1db6f01b781cd56fd6ef04b423cd0828 | 7be0515472d6b9f488deea8e63c1fc43bf9ff343 | refs/heads/master | 2020-03-20T11:05:40.410251 | 2018-10-21T09:45:26 | 2018-10-21T09:45:26 | 137,392,936 | 7 | 0 | null | 2018-06-16T18:15:58 | 2018-06-14T18:12:18 | Python | UTF-8 | Python | false | false | 4,800 | py | import numpy as np
#from random import shuffle
import random
import copy
class Rules():
def __init__(self):
self.card_number = ['siebener',
'achter',
'neuner',
'zehner',
'unter',
'ober',
'koenig',
'sau']
self.card_color = ['eichel', 'gras', 'herz', 'schellen']
self.card_scores = [0,0,0,10,2,3,4,11]
############## eichel # gras # herz # schellen #
self.cards = [[0,0], [1,0], [2,0], [3,0], #siebener
[0,1], [1,1], [2,1], [3,1], #achter
[0,2], [1,2], [2,2], [3,2], #neuner
[0,3], [1,3], [2,3], [3,3], #zehner
[0,4], [1,4], [2,4], [3,4], #unter
[0,5], [1,5], [2,5], [3,5], #ober
[0,6], [1,6], [2,6], [3,6], #koenig
[0,7], [1,7], [2,7], [3,7]] #sau
self.game_names = ['sauspiel', 'solo', 'wenz']
############# eichel # gras # herz # schellen #
self.games = [[None, None], #no game
[0,0], [1,0], [3,0], #sauspiel
[0,1], [1,1], [2,1], [3,1], #solo
[None, 2]] #wenz
self.reward_basic = [0, 20, 50, 50] # no game, sauspiel, solo, wenz
self.reward_schneider = [0, 10, 20] # normal, schneider, schneider schwarz
self.winning_thresholds = [0, 30, 60, 90, 119]
def shuffle_cards(self):
cards = copy.copy(self.cards)
#random.seed(1)
random.shuffle(cards)
return cards
def deal_cards(self, number_of_players=4):
shuffled_cards = self.shuffle_cards()
return [shuffled_cards[:8],
shuffled_cards[8:16],
shuffled_cards[16:24],
shuffled_cards[24:32]]
def name_of_cards(self, cards_list):
return [[self.card_color[color], self.card_number[number]] \
for color, number in cards_list]
def name_of_game(self, game):
color = game[0]
game_type = game[1]
if color == None:
return [None, self.game_names[game_type]]
else:
return [self.card_color[color], self.game_names[game_type]]
def get_specific_cards(self, cards_list, card=[None, None]):
if card[0] == None and card[1] == None:
return cards_list
if card[0] != None and card[1] != None:
if card in cards_list:
return card
else:
return []
if card[0] != None:
return [[color, number] for color, number in cards_list if (color in [card[0]])]
if card[1] != None:
return [[color, number] for color, number in cards_list if (number in [card[1]])]
def get_trumps(self, game, cards_list):
if self.name_of_game(game)[1] == 'sauspiel':
trump_colors = [2] #Herz
trump_numbers = [4, 5] # Unter, Ober
elif self.name_of_game(game)[1] == 'solo':
trump_colors = [game[0]]
trump_numbers = [4, 5]
else: #wenz
trump_colors = []
trump_numbers = [4]
return [[color, number] for color, number in cards_list \
if color in trump_colors or number in trump_numbers]
def get_specific_cards2(self,
cards_list,
game,
card=[None, None],
wo_trumps=True):
if wo_trumps:
cards_in_color = self.get_specific_cards(cards_list, [card[0], None])
trumps = self.get_trumps(game, cards_list)
return [cards for cards in cards_in_color \
if cards not in trumps]
else:
return self.get_specific_cards(cards_list, card)
def get_index(self, item, type):
"""
Get index either cards or games list
Params:
item: card or game
type: string if item is card or game
"""
if type == 'card':
return self.cards.index(item)
else:
return self.games.index(item)
def get_one_hot_cards(self, cards_list_indexed):
number_cards = len(self.cards)
return_ = [int(i in cards_list_indexed) for i in range(number_cards)]
return return_
def get_one_hot_games(self, game_indexed):
number_games = len(self.games)
return [int(i in game_indexed) for i in range(number_games)]
def get_one_hot(self, subgroup, len_group):
return [int(i in subgroup) for i in range(len_group)]
| [
"[email protected]"
] | |
27cacce4b3a5ddfcd0ac9cd6fe61e916c9aee2a2 | 1210fd2bdad04eb7dfb7c015caf3aaa9a06c73d4 | /meditation/post/models.py | a9d989806902f13e2cf6e04682c7e119bd67da89 | [] | no_license | andyxdman/meditation | c719c8d3a906a29d2ea521fc14d8f093e92dc499 | 612d8c56ed1c658018b8a583d55ce712b4f2cf79 | refs/heads/master | 2020-09-22T17:54:41.528561 | 2019-12-02T15:47:30 | 2019-12-02T15:47:30 | 225,292,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
POST_DRAFT = 0
POST_NORMAL = 1
POST_HIDDEN = 2
POST_TYPE = (
(POST_DRAFT, '草稿'),
(POST_NORMAL, '正常'),
(POST_HIDDEN, '隱藏'),
)
owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作者')
post_title = models.CharField(max_length=200, verbose_name='標題')
post_content = models.TextField(max_length=1024, verbose_name='內容')
category = models.ForeignKey('Category', on_delete=models.CASCADE)
tags = models.ManyToManyField('Tag')
pub_time = models.DateTimeField(auto_now_add=True, verbose_name='建立時間')
mod_time = models.DateTimeField(auto_now=True, verbose_name='更新時間')
views = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return self.post_title
class Meta:
ordering = ['-pub_time']
verbose_name = verbose_name_plural = '文章'
class Category(models.Model):
category_name = models.CharField(max_length=200, verbose_name='分頖名字')
pub_time = models.DateTimeField(auto_now_add=True, verbose_name='建立時間')
mod_time = models.DateTimeField(auto_now=True, verbose_name='更新時間')
class Meta:
ordering = ['-pub_time']
verbose_name = verbose_name_plural = '分頖'
class Tag(models.Model):
tag_name = models.CharField(max_length=200, verbose_name='標籤名字')
pub_time = models.DateTimeField(auto_now_add=True, verbose_name='建立時間')
mod_time = models.DateTimeField(auto_now=True, verbose_name='更新時間')
class Meta:
ordering = ['-pub_time']
verbose_name = verbose_name_plural = '標籤' | [
"[email protected]"
] | |
abf504d987f86ea021d138b7f2ff2b1fa1df0dc8 | 2173a5432ff7d47346cdd4d45811284b117708c6 | /Python/ifstatements.py | 03085abbc1f64a069c21109d7493c2e31357778e | [] | no_license | lucasvdiepen/PythonAchievements | beac4abeac7a46920f641a2f34db9d8a4438ba2a | 25eb9f4518abb69f31c3c979e041319810deb42b | refs/heads/master | 2023-01-10T07:44:45.578549 | 2020-11-11T18:05:13 | 2020-11-11T18:05:13 | 292,278,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | varA = 5
varB = 10
if (varA == 5):
print("varA staat gelijk aan 5")
else:
print("varA staat niet gelijk aan 5")
if(varA == varB):
print("varA staat gelijk aan varB")
elif (varA < varB):
print("varA is kleiner dan varB")
elif (varA == 50):
pass #Doe niks
else:
print("varA is toch groter dan varB")
if(False):
print("Doe dit")
varW = False
varX = True
varY = True
varZ = True
if(varW and varX or varY and varZ):
print("Print dit")
if(varW and (varX or varY) and varZ):
print("Print mij ook")
print("Einde programma") | [
"[email protected]"
] | |
a7833550c3629ae6e2e33022924a2e4d51d2dcc7 | 77653811587e0d9285380ae866952b3fcf09ae80 | /dj4e-samples/views/views/settings.py | 8bb54857eb0d6da3145fd0e858926655dc91be25 | [
"MIT",
"CC-BY-3.0"
] | permissive | natc23/SI_364_Local | 477c82a74f7ac34bcca9db33c11ea6ec1c333cd5 | e72c98bef29ee9d6cdb1b9c120152ffc007293d2 | refs/heads/master | 2020-04-24T00:37:57.639011 | 2019-04-12T01:17:48 | 2019-04-12T01:17:48 | 171,571,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | """
Django settings for views project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'csox50cw%_4qmx($epil-nwuon06&i#dfz3qx=bgwsmw4bnjrm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [ '*' ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home.apps.HomeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'views.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'views.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
382879de143cc3ef780b3c978f986a11db227dfb | f04e9d6cc8318f3ebd5a613e6eb87a08d1da9f03 | /Lesson35_PIR_LED/code/python/Lesson35_PIR_LED.py | bd8ecd9c95bd94d580d42f45f917e6259648dbc5 | [] | no_license | adeept/Adeept_Ultimate_Starter_Kit_for_RPi | bef331c62aeb9d6bde19b8606380f7cf491979f6 | 2fc3655cd2de187d2388dd78b37e4d8b883d5c68 | refs/heads/master | 2022-11-04T10:12:15.869910 | 2020-06-20T10:01:53 | 2020-06-20T10:01:53 | 258,106,856 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import RPi.GPIO as GPIO
import time
PIR_OUT_PIN = 13 # pin11
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(PIR_OUT_PIN, GPIO.IN) # Set BtnPin's mode is input
def destroy():
GPIO.cleanup() # Release resource
class Led:
def __init__(self, pin):
self.pin = pin # LED GPIO pin
def setup(self):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.HIGH)
def on(self):
"""turn on led"""
GPIO.output(self.pin, GPIO.LOW)
def off(self):
"""turn off led"""
GPIO.output(self.pin, GPIO.HIGH)
def destroy(self):
"""close GPIO pin"""
GPIO.cleanup()
if __name__ == '__main__': # Program start from here
setup()
led = Led(11)
led.setup()
try:
while True:
if GPIO.input(PIR_OUT_PIN) == GPIO.LOW:
print('...Movement not detected!')
led.off()
else:
print('Movement detected!...')
led.on()
time.sleep(0.5)
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
led.destroy()
| [
"[email protected]"
] | |
7dda60f128bc4578497e8aab41bcc3504e0f2038 | 7c0b611f13346e24970ae6d59829cc11f43f7168 | /backend/api/endpoints/lobby.py | 590cc11302dc7c9abf915e1bbd756ea78be94320 | [] | no_license | dl-eric/zhao-peng-you | e7e5823762d366d63e13225b2c361179fbdb898c | 0cc5954fbd33bcd9ba8c0aae1778ce7cee477f45 | refs/heads/main | 2022-12-28T16:24:35.007146 | 2020-10-12T03:14:57 | 2020-10-12T03:14:57 | 302,989,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,780 | py | from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
from backend.core.lobby_manager import LobbyManager
from backend.core.exceptions import SessionsFullException, LobbyNotFoundException, LobbyPlayerExistsException
router = APIRouter()
@router.get('/count')
async def get_num_lobbies():
lm = LobbyManager.get_instance()
return lm.get_num_lobbies()
@router.put('/create')
async def create_lobby():
# Ask game if we can create a new lobby
lm = LobbyManager.get_instance()
code = None
try:
code = lm.create_lobby()
except SessionsFullException:
raise HTTPException(status_code=400, detail="Can't create lobby")
# Return lobby code
return code
# Message Protocol:
# First message client sends should be the player name they want
# First message client receives should be the player name they're assigned (server sanitized)
# Name message format:
# name:<name here>:<uuid>
#
# Chat message format:
# send_chat:<message>
#
# Disconnect msg format:
# disconnect:<player_name>:<player_id>
#
# New player msg format:
# new:<new player's name>
#
# Kick player msg format:
# kick:<player being kicked name>:<player id>
#
@router.websocket('/{lobby_code}')
async def lobby_websocket(websocket: WebSocket, lobby_code: str, player_name: str = None):
lm = LobbyManager.get_instance()
lobby = None
try:
lobby = lm.get_lobby(lobby_code)
except LobbyNotFoundException:
# Websocket close logic
await websocket.close()
return
if not player_name or len(player_name) < 2:
await websocket.close()
return
(new_player_id, new_player_name) = await lobby.join_lobby(player_name)
await lobby.connect(websocket, new_player_id)
print(new_player_name, "joined lobby", lobby_code, "with id", new_player_id)
await websocket.send_text("name:" + new_player_name + ":" + str(new_player_id))
try:
while True:
message = await websocket.receive_text()
await websocket.send_text(f"Message text was: {message}")
values = message.split(':')
if values[0] == "disconnect":
try:
await lobby.disconnect(websocket, values[1], values[2])
except:
pass
elif values[0] == "send_chat":
await lobby.broadcast(new_player_name + ": " + values[1])
elif values[0] == "kick":
await lobby.kick(values[1], values[2])
except WebSocketDisconnect:
await lobby.disconnect(websocket, new_player_name, new_player_id)
print(new_player_name, "WS Disconnected from", lobby_code) | [
"[email protected]"
] | |
16929652a578a13f35466e8c785048bd1a45f63a | f9a2ac4593462c669ec2cf996a01d3706c6bb955 | /bookmarks/views.py | be9ec2b1390839acb139afc4935ab43cc2f95b90 | [] | no_license | rogatzkij/readhelper-be | 2287a341780509a0d76ac69b9042da9222230388 | 18fe3e4263a431c29f84d51c9136f1b620568bc3 | refs/heads/master | 2022-04-04T13:39:46.305275 | 2020-01-04T14:30:46 | 2020-01-04T14:30:46 | 225,032,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | from django.db import IntegrityError
from django.http import JsonResponse
from django.utils.timezone import now
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from bookmarks.models import Bookmark
from bookmarks.serializer import BookmarkSerializer
from books.models import Book
# Create your views here.
class BookmarksView(APIView):
""" Просмотр и добавление закладок """
permission_classes = [permissions.IsAuthenticated, ]
def get(self, request):
"""" Получить список закладок """
# Достаем параметры из запроса
try:
book_id = int(request.GET.get("book"))
except:
return Response(status=400, data='Не правильный тип параметра book')
# Находим нужную книгу
try:
book = Book.objects.get(id=book_id, owner=request.user)
except:
return Response(status=404, data='Книга с таким id не найдена')
# Получаем закладки в этой книге
bookmarks = Bookmark.objects.filter(book=book)
# Сериализуем и выдаем результат в ответе
serializer = BookmarkSerializer(bookmarks, many=True)
return JsonResponse({'bookmarks': serializer.data})
def post(self, request):
"""" Добавить закладку """
# Достаем параметры из запроса
try:
book_id = int(request.GET.get("book"))
position = int(request.GET.get("position"))
except:
return Response(status=400, data='Не правильный тип параметра book или position')
# Находим нужную книгу
try:
book = Book.objects.get(id=book_id, owner=request.user)
except:
return Response(status=404, data='Книга с таким id не найдена')
# Добавляем закладку
try:
b = Bookmark(book=book, position=position, date=now())
b.save()
except IntegrityError:
return Response(status=409, data='Такая закладка уже существует')
# Получаем закладки в этой книге
bookmarks = Bookmark.objects.filter(book=book)
# Сериализуем и выдаем результат в ответе
serializer = BookmarkSerializer(bookmarks, many=True)
return JsonResponse({'bookmarks': serializer.data})
def delete(self, request):
# Достаем параметры из запроса
try:
bookmark_id = int(request.GET.get("bookmark"))
except:
return Response(status=400, data='Не правильный тип параметра bookmark')
try:
bookmark = Bookmark.objects.get(id=bookmark_id)
except:
return Response(status=404, data='Закладка с таким id не найдена')
# Запоминаем книгу
book = bookmark.book
# Удаляем закладку
bookmark.delete()
# Получаем закладки в этой книге
bookmarks = Bookmark.objects.filter(book=book)
# Сериализуем и выдаем результат в ответе
serializer = BookmarkSerializer(bookmarks, many=True)
return JsonResponse({'bookmarks': serializer.data})
| [
"[email protected]"
] | |
7ed539393db275f2f1dd067a9bf2e75a118f4e81 | 6edcb269d4f54ea37f7f12c31169192ab13e882f | /text.py | 3518e7030baf503f62f4b6845e0c3e6f9c65fe59 | [] | no_license | Jovi007/newGit | 63f79befe801865d8132f6211422fa473e452e68 | 7b26570343d95d00b4e1dd4c05383ca8dcb43653 | refs/heads/master | 2021-01-01T04:58:04.412831 | 2016-04-20T10:10:59 | 2016-04-20T10:10:59 | 56,666,477 | 0 | 0 | null | 2016-04-20T10:10:59 | 2016-04-20T07:43:26 | Python | UTF-8 | Python | false | false | 175 | py | print 'new branch'
print 'master branch fix'
print 'fix conflict'
print 'I want master get my code,I am dev1'
print 'I am master,I have new code'
print 'fix bug ing.........'
| [
"[email protected]"
] | |
7bba2b545e38840999f2778d1537f8684c064ee8 | 946ead8406872492ca4763e96b4d6a3fb5c05a81 | /venv/Scripts/easy_install-3.7-script.py | c84b105c51d98b7171cf4562cb748e94afc3d9ac | [] | no_license | Hortmagen21/Flex | 14249ad9d8563af4807f29bce935d991ab582d8f | db8cc5a0f11af3c6678e2e3631cc296efb44f1db | refs/heads/master | 2023-01-24T14:49:59.074381 | 2020-09-21T12:27:44 | 2020-09-21T12:27:44 | 243,205,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | #!C:\Users\Max\Flex\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
e1c5d4e673bec81279d06a0f9df24cc150fee5d6 | c2911877d9e892c0ffa31399af8f326b1fcc14b3 | /flaskPojects/flaskBlog/__init__.py | 97fa28ec0bd4bb636bb877ae02272222dfbc1c2e | [] | no_license | MuneebSheikh/Python-Flask- | d313955aa6fb566cd80b19b3333f45802174531c | a29ac9aedc622597f6efdc45e2bde1bf7d53860a | refs/heads/master | 2020-07-15T08:15:50.398176 | 2019-10-24T11:51:11 | 2019-10-24T11:51:11 | 205,519,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = '0e4cc2d780d7f2b9a1be03e1a42e84c9'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login' #same as login function name this will be used in url_for()
login_manager.login_message_category = 'info' #the class name which we are using in html [now we are using bootstrap]
##############################################################################
# Do not move this import because it will create errors on importing modules
from flaskBlog import routes
##############################################################################
| [
"[email protected]"
] | |
f8030a439634075910d8b4cc1c7c7a989af7d51d | c6a624ffc2b6165d8e1f1c85a45ae1fa1c2baee4 | /OJ/CodeWars/Find the odd int.py | fcd97c3bb29e11c390a08aaea3b08331fe73416b | [
"Unlicense"
] | permissive | JuChunChen/Algorithm | 41be8f8b9fd0543ef4d3e2297c3a0be81fc1433d | 8291a7ed498a0a9d763e7e50fffacf78db101a7e | refs/heads/master | 2023-04-02T21:25:08.227053 | 2021-04-10T07:57:55 | 2021-04-10T07:58:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | from functools import reduce
def find_it(seq):
return [
k for k, v in {
v: (lambda val, seq: reduce(lambda r, v: r + (v == val), seq, 0)
)(v, seq)
for v in seq
}.items() if v % 2 != 0
][0]
"""
def find_it(seq):
for i in seq:
if seq.count(i)%2!=0:
return i
"""
| [
"[email protected]"
] | |
045c865fc678eba2750f62646d81f6c24d5e15cb | 7e93b1c33045b4c03054f42b6a2b800279b12a9b | /core/cache/backends/redis/compressors/base.py | 8d9e74deabf56d11d14abf6ab944ca71c3f9526c | [
"MIT"
] | permissive | anthill-arch/framework | 6f8036980667843f2be1414850255cf6a10e2dcd | a6c238a62ae9c3fb319d12e77f7e9047aab75e8d | refs/heads/master | 2020-05-09T06:01:31.186830 | 2019-08-23T13:52:43 | 2019-08-23T13:52:43 | 180,988,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | class BaseCompressor(object):
def __init__(self, options):
self._options = options
def compress(self, value):
raise NotImplementedError
def decompress(self, value):
raise NotImplementedError
| [
"[email protected]"
] | |
b049e56da653b855a8ad4584671e6605341e7f9f | 50c02a07926f706a9de70b6cc2576c3daf3c441e | /Maya/Python/digital37/maya/lighting/test.py | f47a0bc42d69b2354552ea3d07de3bb02055e340 | [] | no_license | kizikay/digital37 | d24204a0ae68ef84c0778333ddaa01137f06731f | 9bcb111c013cbbd51b36f3ba32346a78349755cf | refs/heads/master | 2021-05-29T12:57:36.466534 | 2012-08-05T17:11:25 | 2012-08-05T17:11:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | import maya.cmds as cmds
import re
import os
def getShadingSG(shapesList):
for shape in shapesList :
#def SYH_changeMapDetail(detail):
#
# noTexture = False
# pattern = "."
#
# sel = cmds.ls(sl = True)
# if(len(sel) <= 0):
# print "no object are selected"
# return
#
# cmds.select(cl = True)
#
# for s in sel:
# print s
# cmds.select(s, r = True)
# cmds.hyperShade(s, smn = True)
# temp = cmds.ls(sl = True)
# file = cmds.listConnections(temp[0], type = "file")
# if(file == None):
# noTexture = True
# else:
# noTexture = False
#
# if(not noTexture):
# texture = cmds.getAttr(file[0] + ".fileTextureName")
# print texture
# finFile = texture.replace('.','__' + detail + '.')
# print finFile
# #if os.path.isfile(finFile) or not os.path.isfile(finFile) :
# finFile = finFile.replace('\\','/')
# finFile = re.sub( ('^.*/(' + 'sourceimages' + ')'), 'sourceimages', finFile )
# cmds.setAttr(file[0] + ".fileTextureName", finFile, type = "string")
# print finFile
# print "process ok!"
#SYH_changeMapDetail("M") | [
"[email protected]@e21bde55-81c9-5408-e5b9-2d3a39b4433e"
] | [email protected]@e21bde55-81c9-5408-e5b9-2d3a39b4433e |
abc977568852cbb171525797863a63f3f63231a4 | b314b8ccfdb83d84dd54df341a3191ed7aedab08 | /text_categorizer/trainer.py | b36437de54f433634f6c2d4511dc935614a18ec9 | [] | no_license | LuisVilarBarbosa/DISS | 9dc817c18197795277a81c1ab2aa8a00f014fd4d | d276b4f3e98a811031da3971587bf15fb292fb7d | refs/heads/master | 2020-04-22T22:09:19.391734 | 2019-07-26T13:43:29 | 2019-07-26T13:44:22 | 170,697,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | #!/usr/bin/python3
# coding=utf-8
import classifiers
import pickle_manager
from os.path import isfile
from pandas import read_excel
#from profilehooks import profile
from FeatureExtractor import FeatureExtractor
from functions import data_frame_to_document_list
from logger import logger
from Parameters import Parameters
from Preprocessor import Preprocessor
from train_test_split import train_test_split
#@profile
def main(config_filename):
logger.debug("Starting execution.")
parameters = Parameters(config_filename, training_mode=True)
if parameters.preprocessed_data:
if not isfile(parameters.excel_file) and not isfile(parameters.preprocessed_data_file):
logger.error("Please, provide a valid Excel file or a valid preprocessed data file.")
quit()
if not isfile(parameters.preprocessed_data_file) and isfile(parameters.excel_file):
logger.info("Loading Excel file.")
data_frame = read_excel(parameters.excel_file)
logger.info("Creating documents.")
docs = data_frame_to_document_list(data_frame)
logger.info("Storing generated documents.")
pickle_manager.dump_documents(docs, parameters.preprocessed_data_file)
logger.info("Preprocessing documents.")
preprocessor = Preprocessor(stanfordnlp_language_package=parameters.stanfordnlp_language_package, stanfordnlp_use_gpu=parameters.stanfordnlp_use_gpu, stanfordnlp_resources_dir=parameters.stanfordnlp_resources_dir, training_mode=parameters.training_mode)
preprocessor.preprocess(text_field=parameters.excel_column_with_text_data, preprocessed_data_file=parameters.preprocessed_data_file)
logger.info("Checking generated data.")
pickle_manager.check_data(parameters.preprocessed_data_file)
else:
if not isfile(parameters.preprocessed_data_file):
logger.error("The indicated preprocessed data file does not exist.")
quit()
logger.info("Extracting features.")
feature_extractor = FeatureExtractor(nltk_stop_words_package=parameters.nltk_stop_words_package, vectorizer_name=parameters.vectorizer, training_mode=parameters.training_mode, use_lda=parameters.use_lda, document_adjustment_code=parameters.document_adjustment_code, remove_adjectives=parameters.remove_adjectives, synonyms_file=parameters.synonyms_file, features_file=parameters.features_file)
X, y, _lemmas = feature_extractor.generate_X_y(class_field=parameters.excel_column_with_classification_data, preprocessed_data_file=parameters.preprocessed_data_file)
logger.info("Splitting dataset into training and test subsets.")
train_test_split(y, parameters.test_subset_size, parameters.preprocessed_data_file, parameters.force_subsets_regeneration)
logger.info("Running classifiers.")
p = classifiers.Pipeline(parameters.classifiers, parameters.cross_validate)
metadata = pickle_manager.get_docs_metadata(parameters.preprocessed_data_file)
training_set_indexes = metadata['training_set_indexes'].tolist()
test_set_indexes = metadata['test_set_indexes'].tolist()
assert len(training_set_indexes) == len(set(training_set_indexes))
assert len(test_set_indexes) == len(set(test_set_indexes))
for elem in feature_extractor.to_remove:
try:
training_set_indexes.remove(elem)
except ValueError:
test_set_indexes.remove(elem)
logger.info("Accuracies:")
p.start(X, y, parameters.number_of_jobs, parameters.set_num_accepted_probs, training_set_indexes, test_set_indexes, parameters.resampling)
logger.debug("Execution completed.")
| [
"[email protected]"
] | |
df6b3cdaa09073a8075e928c7ef9df25a4f7150f | 1f3a2cee3654c11586b30151fd9a3fc6fd705c0a | /deep_learning/pic.py | 5488840c452ac9667371b0542085d8a928c76634 | [] | no_license | 2020668/AI | 24ff42cefacac3f397e7a6f54dda7e9d741f2e03 | 3e1a78a6348e453b0f3e8862a784105620a22bc1 | refs/heads/master | 2020-12-09T02:03:55.311209 | 2020-02-10T15:36:26 | 2020-02-10T15:36:26 | 231,186,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import tensorflow as tf
print(tf.__version__)
# 输出'2.0.0-alpha0'
print(tf.test.is_gpu_available())
# 会输出True,则证明安装成功
| [
"[email protected]"
] | |
4a699bc62ac3bdeec338f860c09e2a05aefe9edf | 7a6c9a4e38e4c7271bddd3c51ff8fb1bfa714c87 | /4/1.py | 923a108704b6cead829194f7df0776d896c91fc0 | [] | no_license | simonsayscodes/School | bf934e2a32b01e063d5f3fa49e4a4668b566518c | 377de06267ab6744992fd4f241c64cb047ba8c26 | refs/heads/master | 2023-02-23T04:49:39.232936 | 2021-01-24T22:53:03 | 2021-01-24T22:53:03 | 292,821,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | name = "Pog" # Använd inte () när det är name
age = 18,77
print(f"Ajad {age} är riktigt jobbig")
if name is "Simon Jendman": #Du glömde : tänk alltid att om du använder if name is / is not använd :
print ("Och ska åka snart")
elif age == 19: #Samma sak som tidigare med if name is fast elif
print ("snälla din charaktär är dålig")
else:
print("katt bajs") | [
"[email protected]"
] | |
4e515a7ffea62d3d0983fe8800071ecacb2849a2 | 14c712219774aef5a620cde4cd3150ac31cca512 | /jjwxc/parse_user.py | a4155212fddb99dd41ceb2e3ad23c0996cf401c5 | [
"MIT"
] | permissive | jeffmxh/spider | 8d9a6e41852c84967bb44c170927a9e6a6aa96af | bec2376a05a64185d956760d9b143424c639c33f | refs/heads/master | 2021-05-11T23:57:38.861728 | 2019-01-06T07:55:47 | 2019-01-06T07:55:47 | 117,522,604 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,527 | py | import pandas as pd
import threading
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import argparse
import re
import os
import time
import random
import logging
'''
获取html原网页文本
参数:url,即要打开的网页链接
返回值:为html网页文本
'''
class Spider:
def __init__(self):
self.headers = {'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0'}
def get_url_soup(self, url, encoding='gbk'):
# time.sleep(3 + 3 * random.random())
response = requests.get(url, headers=self.headers)
response.encoding = encoding
soup = BeautifulSoup(response.text, 'lxml')
return soup
def get_html_text(self, url, encoding='gbk'):
try:
time.sleep(5 + 10 * random.random())
r = requests.get(url, timeout = 30)
r.raise_for_status()
r.encoding = encoding
return r.text
except:
return ""
def parse_table(self, text):
data = []
table = text.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
return data
def text_trim(self, text):
pattern = re.compile(',|<.+?>|\\u3000')
text = pattern.sub(',', str(text))
text = re.sub(',+|,+', ',', text)
text = re.sub('^,|,$', '', text)
return text
def write_list_txt(self, data, file_name):
assert isinstance(data, list)
assert file_name.endswith('.txt')
with open(file_name, 'w') as f:
f.writelines('\n'.join(data))
def write_txt(self, data, file_name):
assert isinstance(data, str)
assert file_name.endswith('.txt')
with open(file_name, 'w') as f:
f.write(data)
def list_trim(tab):
foo = [re.sub('\s+', '_', x) for x in tab]
return '\t'.join(foo)
def get_logger():
logger = logging.getLogger('my_logger')
logger.setLevel(logging.DEBUG)
# 建立一个streamhandler来把日志打在CMD窗口上,级别为info以上
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# 设置日志格式
formatter = logging.Formatter('[%(levelname)-3s]%(asctime)s %(filename)s[line:%(lineno)d]:%(message)s')
ch.setFormatter(formatter)
#将相应的handler添加在logger对象中
logger.addHandler(ch)
return logger
def load_users(file_name):
user_root_path = 'save/users/'
if not os.path.exists(user_root_path):
os.makedirs(user_root_path)
current_users = os.listdir(user_root_path)
user_list = []
with open(file_name, 'r') as f:
for user in f.readlines():
user_list.append(user.strip())
TODO_users = list(set(user_list) - set(current_users))
return TODO_users
def parse_recent_table(soup):
data = []
table_body = soup.find('table')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [re.sub('\n', '', ele.text.strip()) for ele in cols]
data.append([ele for ele in cols if ele])
return data
def parse_user(user_id, spider):
user_root_path = 'save/users/'
if not os.path.exists(user_root_path):
os.makedirs(user_root_path)
# 订阅的作品
recent_url = 'http://www.jjwxc.net/onereader_ajax.php?readerid={}&action=show_vipServer'.format(user_id)
recent_soup = spider.get_url_soup(recent_url, encoding='utf-8')
time.sleep(0.5 + random.random())
recent_list = parse_recent_table(recent_soup)
recent_list = ['\t'.join(x) for x in recent_list][1::2]
# 收藏的作品
star_url = 'http://www.jjwxc.net/onereader_ajax.php?readerid={}&action=show_novelsa'.format(user_id)
star_soup = spider.get_url_soup(star_url, encoding='utf-8')
time.sleep(0.5 + random.random())
star_result = parse_recent_table(star_soup)
star_result = ['\t'.join(x) for x in star_result]
# 用户名
user_href = 'http://www.jjwxc.net/onereader.php?readerid={}'.format(user_id)
user_soup = spider.get_url_soup(user_href)
time.sleep(0.5 + random.random())
user_name = user_soup.find('span', attrs={"id":"favorite_reader"})['rel']
# 存储数据
user_path = user_root_path + user_id + '/'
os.mkdir(user_path)
spider.write_list_txt(recent_list, file_name=user_path+'订阅.txt')
spider.write_list_txt(star_result, file_name=user_path+'收藏.txt')
spider.write_txt(user_name, file_name=user_path+'用户名.txt')
time.sleep(1 + 3 * random.random())
def main(file_path):
spider = Spider()
logger = get_logger()
TODO_users = load_users(file_path)
users_count = len(TODO_users)
for i, user in enumerate(TODO_users):
try:
parse_user(user, spider)
logger.info('Step {} of {}, User : {} parsed sucessfully!'.format(i+1, users_count, user))
except:
logger.info('Step {} of {}, User : {} parsed Failed!'.format(i+1, users_count, user))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='用户抓取脚本说明')
parser.add_argument('-i', '--inpath', dest='input_path', nargs='?', default='',
help='Name of the input txt file containing user list')
args = parser.parse_args()
main(args.input_path)
| [
"[email protected]"
] | |
c74e724a225f8334412b1593428a285b7c2f0acd | 69eca466d7f184e9cb100572ba1dee4264d2e0c8 | /organization/migrations/0001_initial.py | bdd91ae95d41f35a0106038179b5451172ed4734 | [] | no_license | bbkchdhry/django-crud | 1247dafe55d522994ae0d070e669a67b7be1d553 | a2724b0a6c611ae3759ae7b4fd63315a66a921c4 | refs/heads/master | 2021-08-29T23:26:33.299576 | 2017-12-15T08:37:02 | 2017-12-15T08:37:02 | 114,346,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | # Generated by Django 2.0 on 2017-12-15 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dept_name', models.CharField(blank=True, max_length=30, null=True)),
],
options={
'db_table': 'Department',
},
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('org_name', models.CharField(blank=True, max_length=30, null=True)),
('org_type', models.CharField(blank=True, max_length=30, null=True)),
],
options={
'db_table': 'Organization',
},
),
migrations.AddField(
model_name='department',
name='organization',
field=models.ForeignKey(db_column='organization_id', on_delete=django.db.models.deletion.CASCADE, related_name='departments', to='organization.Organization'),
),
]
| [
"[email protected]"
] | |
5de0f9c4b170ee96a23070ab179376033873edf0 | 3436a1502b344e92351c03a03a2e97e9dc0b3e00 | /chapter4/code/classifier-fast.py | 3283e6a907145eb9ea64e8a5797152456d44f9b9 | [] | no_license | Rajiv-Nayan/Machine-Learning | 0c47321c59938600f6f77ab9a6f7de5637598b9e | 3850eb7c4ce1018c92ccf6a228174599613cfba7 | refs/heads/master | 2022-11-30T04:26:21.882330 | 2020-08-11T16:09:59 | 2020-08-11T16:09:59 | 284,464,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | import os
import numpy as np
from collections import Counter
from sklearn.metrics import accuracy_score
def make_Dictionary(root_dir):
all_words = []
emails = [os.path.join(root_dir,f) for f in os.listdir(root_dir)]
for mail in emails:
with open(mail) as m:
for line in m:
words = line.split()
all_words += words
dictionary = Counter(all_words)
list_to_remove = dictionary.keys()
for item in list_to_remove:
if item.isalpha() == False:
del dictionary[item]
elif len(item) == 1:
del dictionary[item]
dictionary = dictionary.most_common(3000)
return dictionary
def extract_features(mail_dir):
files = [os.path.join(mail_dir,fi) for fi in os.listdir(mail_dir)]
features_matrix = np.zeros((len(files),3000))
train_labels = np.zeros(len(files))
count = 0;
docID = 0;
for fil in files:
with open(fil) as fi:
for i,line in enumerate(fi):
if i == 2:
words = line.split()
for word in words:
wordID = 0
for i,d in enumerate(dictionary):
if d[0] == word:
wordID = i
features_matrix[docID,wordID] = words.count(word)
train_labels[docID] = 0;
filepathTokens = fil.split('/')
lastToken = filepathTokens[len(filepathTokens) - 1]
if lastToken.startswith("spmsg"):
train_labels[docID] = 1;
count = count + 1
docID = docID + 1
return features_matrix, train_labels
from sklearn.neighbors import KNeighborsClassifier
TRAIN_DIR = "../train-mails"
TEST_DIR = "../test-mails"
dictionary = make_Dictionary(TRAIN_DIR)
print "reading and processing emails from file."
features_matrix, labels = extract_features(TRAIN_DIR)
test_feature_matrix, test_labels = extract_features(TEST_DIR)
neigh = KNeighborsClassifier(n_neighbors=3)
print "Training model."
#train model
neigh.fit(features_matrix, labels)
predicted_values = neigh.predict(test_feature_matrix)
print "FINISHED classifying. accuracy score : "
print accuracy_score(test_labels, predicted_values)
| [
"[email protected]"
] | |
b70b66f6096c21bc356db17ef7a9da3c02eaf719 | f8bb2d5287f73944d0ae4a8ddb85a18b420ce288 | /python/basic/for/format_obj.py | df47968c9dd34f209133f19983ab9c23ed9b0fe2 | [] | no_license | nishizumi-lab/sample | 1a2eb3baf0139e9db99b0c515ac618eb2ed65ad2 | fcdf07eb6d5c9ad9c6f5ea539046c334afffe8d2 | refs/heads/master | 2023-08-22T15:52:04.998574 | 2023-08-20T04:09:08 | 2023-08-20T04:09:08 | 248,222,555 | 8 | 20 | null | 2023-02-02T09:03:50 | 2020-03-18T12:14:34 | C | UTF-8 | Python | false | false | 46 | py | for 変数 in オブジェクト:
処理
| [
"[email protected]"
] | |
7543cdf445cb135d685ea0c8aadce2b1123acf5d | b07208301e07076da04fde86402fbaf826a90d73 | /chapter2/Chapter_2.1.1.4 KNN.py | 40bbeb0dd79e24167282d812dcdcc51916cb6fb9 | [] | no_license | TaoStarlit/python_ML | b5baf7c90c207a215db2b311bea1f6068b30d881 | 278524509503613b71c0d66c3b533e5b0c79d8d1 | refs/heads/master | 2021-08-29T01:43:40.746184 | 2017-12-13T10:01:51 | 2017-12-13T10:01:51 | 103,505,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py |
# coding: utf-8
# In[1]:
# 从sklearn.datasets 导入 iris数据加载器。
from sklearn.datasets import load_iris
# 使用加载器读取数据并且存入变量iris。
iris = load_iris()
# 查验数据规模。
iris.data.shape
# In[2]:
# 查看数据说明。对于一名机器学习的实践者来讲,这是一个好习惯。
print iris.DESCR
# In[3]:
# 从sklearn.cross_validation里选择导入train_test_split用于数据分割。
from sklearn.cross_validation import train_test_split
# 从使用train_test_split,利用随机种子random_state采样25%的数据作为测试集。
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.25, random_state=33)
# In[4]:
# 从sklearn.preprocessing里选择导入数据标准化模块。
from sklearn.preprocessing import StandardScaler
# 从sklearn.neighbors里选择导入KNeighborsClassifier,即K近邻分类器。
from sklearn.neighbors import KNeighborsClassifier
# 对训练和测试的特征数据进行标准化。
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
# 使用K近邻分类器对测试数据进行类别预测,预测结果储存在变量y_predict中。
knc = KNeighborsClassifier()
knc.fit(X_train, y_train)
y_predict = knc.predict(X_test)
# In[5]:
# 使用模型自带的评估函数进行准确性测评。
print 'The accuracy of K-Nearest Neighbor Classifier is', knc.score(X_test, y_test)
# In[6]:
# 依然使用sklearn.metrics里面的classification_report模块对预测结果做更加详细的分析。
from sklearn.metrics import classification_report
print classification_report(y_test, y_predict, target_names=iris.target_names)
# In[ ]:
| [
"[email protected]"
] | |
be136d34d8b79c6aa596f970cf3d0bd28b394505 | 965da23a89791f445334fd7e23f032435bf993c4 | /dturm.py | 10ce1d10771f2bee7232bbf9443070455fc39296 | [
"MIT"
] | permissive | aktech/dturmscrap | b669c9d461fa89a6d4b9ff146796025d6df2e242 | b6046daa4deec5782f98069f57c4c96262443563 | refs/heads/master | 2021-01-01T16:19:00.902853 | 2015-11-16T19:21:33 | 2015-11-16T19:21:33 | 40,501,549 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,728 | py | # DTU RM Scrapper
# Author: AMiT Kumar <[email protected]>
# Version: 0.1
import sys
sys.path.insert(0, 'libs')
import webapp2
from google.appengine.api import mail
from google.appengine.ext import db
import pickle
import logging
import cookielib
import urllib2
import mechanize
from bs4 import BeautifulSoup
# Globals
roll_no = 'ROLL_NO'
password = 'PASSWORD'
rm_url = 'http://tnp.dtu.ac.in/rm3y/login.php'
# Email Details
sender_address = '[email protected]' # This is based on your GAE ID
user_address = ['USER-EMAIL']
subject = 'DTU RM Notification'
# HTML Parser
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class ObjectProperty(db.BlobProperty):
# Use this property to store objects.
def validate(self, value):
try:
result = pickle.dumps(value)
return value
except pickle.PicklingError, e:
return super(ObjectProperty, self).validate(value)
def get_value_for_datastore(self, model_instance):
result = super(ObjectProperty, self).get_value_for_datastore(model_instance)
result = pickle.dumps(result)
return db.Blob(result)
def make_value_from_datastore(self, value):
try:
value = pickle.loads(str(value))
except:
pass
return super(ObjectProperty, self).make_value_from_datastore(value)
class MyEntity(db.Model):
name = db.StringProperty()
obj = ObjectProperty() # Kudos
def open_browser(url):
# Browser
br = mechanize.Browser()
# Enable cookie support for urllib2
cookiejar = cookielib.LWPCookieJar()
br.set_cookiejar(cookiejar)
# Browser options
br.set_handle_equiv(True)
# br.set_handle_gzip(True) # Experimental feature
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Headers
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1' ) ]
# authenticate
br.open(url)
return br
def select_form(form):
return form.attrs.get('action', None) == 'login.php'
def login(br, roll_no, password):
# Username & Password
br["stud_username"] = roll_no
br["stud_password"] = password
br.submit()
def get_news(br, announce_style=None):
soup = BeautifulSoup(br.response().read())
# announce_news_soup = soup.findAll('h4', {'style': announce_style[0]})
announce_news_soup = soup.find_all('h4', attrs = {'style' : True, 'align': False})
if not announce_news_soup:
return ['Invalid scrap']
def get_contents(s):
return s.contents
announce_news_content = map(get_contents, announce_news_soup)
all_news = map(str, announce_news_content)
all_news = map(strip_tags, all_news)
all_news = all_news[:25:]
return all_news
def add_news_id(a):
for k, v in enumerate(a):
hsh = '#'
a[k] = ('UPDATE: {}{} \n' + a[k]).format(hsh, k+1)
return a
def latest_news(all_news):
entities = MyEntity.all()
entities = entities.fetch(1)
if entities:
old_news = entities[0].obj
else:
old_news = []
logging.info("Old News Empty")
latestnews = [item for item in all_news if item not in old_news]
logging.info('old_news: %s', old_news)
latestnews = add_news_id(latestnews)
logging.info('latestnews: %s', latestnews)
return latestnews
def save_news(all_news):
db.delete(MyEntity.all(keys_only=True))
entity = MyEntity(name="all_news", obj=all_news)
entity.put()
class MainPage(webapp2.RequestHandler):
def get(self):
run_rmscrap()
self.response.write("\n Success!\n")
def run_rmscrap():
br = open_browser(rm_url)
br.select_form(predicate=select_form)
login(br, roll_no, password)
# GET News
allnews = get_news(br)
latestnews = latest_news(allnews)
if latestnews:
save_news(allnews)
logging.info('Saved News: %s', allnews)
# SEND Latest News
body = '\n'.join(latestnews)
if body:
mail.send_mail(sender_address, user_address, subject, body)
logging.info("Mail Sent!")
else:
logging.info("No Latest News Found")
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
| [
"[email protected]"
] | |
b1a48b404cef952e203b19b3d0a78243594f0388 | 97ccf3f73e371afd4037b664ba0c71b0b6af1db3 | /guessing_number.py | aa6edf2ce024ad5db431f145fa4fdc80d98c9d35 | [] | no_license | jaesmanalang/number-guessing | 97f33e61d0c44d3e385965c4c671ab377bb5f7b4 | 919255390e1aef6b43c748c0d0e2551ed08561f2 | refs/heads/master | 2022-08-13T15:51:05.519634 | 2020-05-17T14:51:21 | 2020-05-17T14:51:21 | 264,690,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | import random
import os
attempts = 0
game_playing = True
def play_game():
reset()
global game_playing, attempts
max_range = get_max_range()
the_number = number_to_guess(max_range)
while game_playing:
guess = player_guess(max_range)
if check_guess(guess, the_number):
game_playing = False
while True:
choice = str(input('Do you want to play again? y/n: '))
if choice.lower() == 'y':
play_game()
elif choice.lower() == 'n':
break
else:
continue
print('Thank you for playing.')
def check_guess(guess, the_number):
global attempts, highest_score
if guess == the_number:
print('You guessed it. Congratulations!')
if attempts > 1:
print(f"It took {attempts} tries.")
else:
print(f"It took only {attempts} try.")
return True
elif guess > the_number:
print('Too high!')
return False
else:
print('Too low!')
return False
def number_to_guess(max_num):
return random.randint(1, max_num)
def player_guess(max_num):
global attempts
while True:
try:
guess = int(input(f'Please guess a number between 1-{max_num}: '))
attempts += 1
except ValueError:
print('Please enter a number.')
else:
break
return guess
def get_max_range():
while True:
try:
max_number = int(input('Please enter the maximum number range: '))
except ValueError:
print('Please enter a number.')
else:
break
return max_number
def reset():
global attempts, game_playing
attempts = 0
game_playing = True
clear()
def clear():
os.system('cls')
if __name__ == '__main__':
play_game()
| [
"[email protected]"
] | |
6524ea94820092e4a5ac123ed4c71fb81f6f7c7e | 764001a417797e484d20b90e902dce6e0cc41c97 | /code/models_single_xgb_tree.py | ce587d370ff8141143498bc47cc5fd029f25860f | [] | no_license | apex51/kaggle-home-depot-search-relevance | 39145f8e6d2f944e5e8f50e9b149bd2041d74a92 | 44b439e5ef462dcdde2cd19ff879ca9086639536 | refs/heads/master | 2021-01-19T12:22:03.853816 | 2017-03-09T14:14:11 | 2017-03-09T14:14:11 | 84,448,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | import numpy as np
import pandas as pd
import pickle
import xgboost as xgb
# from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn.metrics import mean_squared_error
from datetime import datetime
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
PROJECT_PATH = '/Users/jianghao/Projects/mine/home_depot/'
#############################
# loading data
#############################
print '='*20
print 'loading features from pickles...'
start = datetime.now()
# load y_train from original features
# notice: original features did not drop the irrelevant features
with open(PROJECT_PATH + 'pickles/df_features.pkl') as f:
df_train, df_test = pickle.load(f)
x_train = df_train.drop(labels=['relevance'], inplace=False, axis=1)
x_test = df_test
y_train = df_train['relevance']
# load data from unigram features
with open(PROJECT_PATH + 'pickles/df_features_unigram.pkl') as f:
df_train_unigram, df_test_unigram = pickle.load(f)
# load data from bigram features
with open(PROJECT_PATH + 'pickles/df_features_bigram.pkl') as f:
df_train_bigram, df_test_bigram = pickle.load(f)
# load data from trigram features
with open(PROJECT_PATH + 'pickles/df_features_trigram.pkl') as f:
df_train_trigram, df_test_trigram = pickle.load(f)
# load data from svd features
with open(PROJECT_PATH + 'pickles/df_features_svd.pkl') as f:
df_train_svd, df_test_svd = pickle.load(f)
x_train = pd.concat((x_train, df_train_unigram, df_train_bigram, df_train_trigram, df_train_svd), axis=1)
x_test = pd.concat((x_test, df_test_unigram, df_test_bigram, df_test_trigram, df_test_svd), axis=1)
print 'done, {}'.format(datetime.now() - start)
#############################
# use hyperopt
#############################
def hyperopt_train_test(params):
print '='*20
print 'loading features from pickles...'
start = datetime.now()
# k-fold
kf = KFold(74067, n_folds=5, shuffle=True, random_state=22)
n_rounds = []
scores = []
for train_index, test_index in kf:
x_train_sp, x_test_sp = x_train.iloc[train_index], x_train.iloc[test_index]
y_train_sp, y_test_sp = y_train[train_index], y_train[test_index]
dtrain = xgb.DMatrix(x_train_sp, y_train_sp)
deval = xgb.DMatrix(x_test_sp, y_test_sp)
watchlist = [(dtrain, 'train'), (deval, 'eval')]
num_rounds = 20000
clf = xgb.train(params, dtrain, num_rounds, watchlist, early_stopping_rounds=15)
n_rounds.append(clf.best_iteration)
scores.append(clf.best_score)
eval_time = datetime.now() - start
print 'done, {}'.format(eval_time)
return {'loss': np.mean(scores),
'n_rounds': n_rounds,
'scores': scores,
'eval_time': str(eval_time)}
def f(params):
results = hyperopt_train_test(params)
return {'loss': results['loss'],
'status': STATUS_OK,
'other_stuff': {'n_rounds': results['n_rounds'],
'scores': results['scores'],
'eval_time': results['eval_time']
}
}
space = {
'task': 'regression',
'booster': 'gbtree',
'objective': 'reg:linear',
'eta': hp.quniform('eta', 0.01, 0.05, 0.01),
'gamma': hp.quniform('gamma', 0, 2, 0.1),
'min_child_weight': hp.quniform('min_child_weight', 0, 10, 1),
'max_depth': hp.quniform('max_depth', 1, 20, 1),
'subsample': hp.quniform('subsample', 0.5, 1, 0.1),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.1),
'silent': 1
}
trials = Trials()
best = fmin(f, space, algo=tpe.suggest, max_evals=30, trials=trials)
# xgb tree booster
# 'colsample_bytree': [0.5],
# 'eta': [0.01],
# 'gamma': [1.3],
# 'max_depth': [15.0],
# 'min_child_weight': [9.0],
# 'subsample': [0.7]},
# 'loss': 0.449447,
# 'eval_time': '0:48:26.242368',
# 'n_rounds': [954, 1287, 969, 890, 862], mean = 990
# 'scores': [0.449414, 0.446667, 0.450931, 0.452023, 0.4482]},
| [
"[email protected]"
] | |
f3409674f6082e19e2cdbb91ddc6cc1956ae779f | 9aea1b19a8681b4c6b15d628a080982fb2d98b39 | /mianJing111111/Google/Implement Queue using Stacks.py | 2e144185623f255bcbf62dc1b0ca3271002fcff4 | [] | no_license | yzl232/code_training | ee7612efc6f166742fcf48e1af715f57a624d3aa | fc165027c3d7b1fec58ebfad2f9ada275a6b8c03 | refs/heads/master | 2021-01-21T04:32:02.522931 | 2016-07-01T21:35:29 | 2016-07-01T21:35:29 | 26,989,266 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # encoding=utf-8
'''
In this method, in en-queue operation, the new element is entered at the top of stack1. In de-queue operation, if stack2 is empty then all the elements are moved to stack2 and finally top of stack2 is returned.
enQueue(q, x)
1) Push x to stack1 (assuming size of stacks is unlimited).
deQueue(q)
1) If both stacks are empty then error.
2) If stack2 is empty
While stack1 is not empty, push everything from satck1 to stack2.
3) Pop the element from stack2 and return it.
'''
# G家考过。
class queue:
def __init__(self):
self.stack1 = []
self.stack2 = []
def enqueue(self, x):
self.stack1.append(x)
def dequeue(self):
if not self.stack1 and not self.stack2: raise ValueError()
if not self.stack2:
while self.stack1: self.stack2.append(self.stack1.pop())
return self.stack2.pop() | [
"[email protected]"
] | |
f933c954e217766db493055dde446e4bbc81de4e | 189e580f1525a4f15a279daacb1e62155c65542b | /metrilyx/metrilyxconfig.py | cf2fba1498ac1c594f3be18db0a7ddacc50de9b8 | [
"Apache-2.0"
] | permissive | hvyas/metrilyx-2.0 | 33c009a0c925d84e31580960d8424c4ab9a7f1be | ca3aac416298fb6ba2550dd040cdc50379ab894a | refs/heads/master | 2020-12-26T04:37:38.725817 | 2014-09-28T22:56:22 | 2014-09-28T22:56:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import os
from datastores import jsonFromFile
_abspath = os.path.abspath(__file__)
_apphome = os.path.dirname(os.path.dirname(_abspath))
CONFIG_FILE = os.path.join(_apphome, "etc/metrilyx/metrilyx.conf")
config = jsonFromFile(CONFIG_FILE)
config["static_path"] = os.path.join(os.path.dirname(_abspath), "static")
config["schema_path"] = os.path.join(_apphome, "schemas")
| [
"[email protected]"
] | |
4a0902dc27453f1c3c75f66b24e4029df02fe5ed | 4f205fc9316eff48092f573b072b644e8f02d8bd | /src/training/mytransforms.py | a8bdbb915bba86ea0d73dbf24e6f4d62a1ae0203 | [
"Apache-2.0",
"MIT"
] | permissive | hip-satomi/microbeSEG | 017d5a0af616b4ee4652c315dea21eb4fc62b754 | 7e5455f1a27bc42ad765ec06e62c36012be71c11 | refs/heads/main | 2023-04-14T10:29:08.178419 | 2023-01-26T10:09:59 | 2023-01-26T10:09:59 | 483,291,328 | 6 | 1 | MIT | 2022-10-21T15:11:37 | 2022-04-19T14:55:12 | Python | UTF-8 | Python | false | false | 15,396 | py | import numpy as np
import random
import scipy
import torch
from imgaug import augmenters as iaa
from skimage.exposure import equalize_adapthist, rescale_intensity
from torchvision import transforms
from src.utils.utils import min_max_normalization
def augmentors(label_type, min_value, max_value):
""" Get augmentations for the training process.
:param label_type: Type of the label images, e.g., 'boundary' or 'distance'.
:type label_type: str
:param min_value: Minimum value for the min-max normalization.
:type min_value: int
:param max_value: Minimum value for the min-max normalization.
:type min_value: int
:return: Dict of augmentations.
"""
data_transforms = {'train': transforms.Compose([Flip(p=1.0),
Contrast(p=0.45),
Scaling(p=0.25),
Rotate(p=0.25),
Blur(p=0.3),
Noise(p=0.3),
ToTensor(label_type=label_type,
min_value=min_value,
max_value=max_value)]),
'val': ToTensor(label_type=label_type, min_value=min_value, max_value=max_value)}
return data_transforms
class Blur(object):
""" Blur augmentation (label-preserving transformation) """
def __init__(self, p=1):
"""
:param p: Probability to apply augmentation to an image.
:type p: float
"""
self.p = p
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
if random.random() < self.p:
sigma = random.random() + 1.0
sample['image'] = scipy.ndimage.gaussian_filter(sample['image'], sigma, order=0)
return sample
class Contrast(object):
""" Contrast augmentation (label-preserving transformation) """
def __init__(self, p=1):
"""
param p: Probability to apply augmentation to an image.
:type p: float
"""
self.p = p
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
if random.random() < self.p:
img = sample['image']
h = random.randint(0, 2)
if h == 0: # Apply CLAHE or contrast stretching
img = equalize_adapthist(np.squeeze(img), clip_limit=0.01)
img = (65535 * img[..., None]).astype(np.uint16)
elif h == 1: # Contrast stretching
if random.randint(0, 1) == 0:
p0, p1 = np.percentile(img, (0.2, 99.8))
else:
p0, p1 = np.percentile(img, (0.1, 99.9))
img = rescale_intensity(img, in_range=(p0, p1))
else: # Apply Contrast and gamma adjustment
dtype = img.dtype
img = (img.astype(np.float32) - np.iinfo(dtype).min) / (np.iinfo(dtype).max - np.iinfo(dtype).min)
contrast_range, gamma_range = (0.75, 1.25), (0.7, 1.3)
# Contrast
img_mean, img_min, img_max = img.mean(), img.min(), img.max()
factor = np.random.uniform(contrast_range[0], contrast_range[1])
img = (img - img_mean) * factor + img_mean
# Gamma
img_mean, img_std, img_min, img_max = img.mean(), img.std(), img.min(), img.max()
gamma = np.random.uniform(gamma_range[0], gamma_range[1])
rnge = img_max - img_min
img = np.power(((img - img_min) / float(rnge + 1e-7)), gamma) * rnge + img_min
img = np.clip(img, 0, 1)
img = img * (np.iinfo(dtype).max - np.iinfo(dtype).min) - np.iinfo(dtype).min
img = img.astype(dtype)
sample['image'] = img
return sample
class Flip(object):
""" Flip and rotation augmentation (label-preserving transformation). Crop needed for non-square images. """
def __init__(self, p=0.5):
"""
param p: Probability to apply augmentation to an image.
:type p: float
"""
self.p = p
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
img = sample['image']
if random.random() < self.p:
h = random.randint(0, 7)
if h == 0:
pass
elif h == 1: # Flip left-right
sample['image'] = np.flip(img, axis=1).copy()
if len(sample) == 3:
sample['label'] = np.flip(sample['label'], axis=1).copy()
elif len(sample) == 4:
sample['border_label'] = np.flip(sample['border_label'], axis=1).copy()
sample['cell_label'] = np.flip(sample['cell_label'], axis=1).copy()
elif h == 2: # Flip up-down
sample['image'] = np.flip(img, axis=0).copy()
if len(sample) == 3:
sample['label'] = np.flip(sample['label'], axis=0).copy()
elif len(sample) == 4:
sample['border_label'] = np.flip(sample['border_label'], axis=0).copy()
sample['cell_label'] = np.flip(sample['cell_label'], axis=0).copy()
elif h == 3: # Rotate 90°
sample['image'] = np.rot90(img, axes=(0, 1)).copy()
if len(sample) == 3:
sample['label'] = np.rot90(sample['label'], axes=(0, 1)).copy()
elif len(sample) == 4:
sample['border_label'] = np.rot90(sample['border_label'], axes=(0, 1)).copy()
sample['cell_label'] = np.rot90(sample['cell_label'], axes=(0, 1)).copy()
elif h == 4: # Rotate 180°
sample['image'] = np.rot90(img, k=2, axes=(0, 1)).copy()
if len(sample) == 3:
sample['label'] = np.rot90(sample['label'], k=2, axes=(0, 1)).copy()
elif len(sample) == 4:
sample['border_label'] = np.rot90(sample['border_label'], k=2, axes=(0, 1)).copy()
sample['cell_label'] = np.rot90(sample['cell_label'], k=2, axes=(0, 1)).copy()
elif h == 5: # Rotate 270°
sample['image'] = np.rot90(img, k=3, axes=(0, 1)).copy()
if len(sample) == 3:
sample['label'] = np.rot90(sample['label'], k=3, axes=(0, 1)).copy()
elif len(sample) == 4:
sample['border_label'] = np.rot90(sample['border_label'], k=3, axes=(0, 1)).copy()
sample['cell_label'] = np.rot90(sample['cell_label'], k=3, axes=(0, 1)).copy()
elif h == 6: # Flip left-right + rotate 90°
img = np.flip(img, axis=1).copy()
sample['image'] = np.rot90(img, axes=(0, 1)).copy()
if len(sample) == 3:
label_img = np.flip(sample['label'], axis=1).copy()
sample['label'] = np.rot90(label_img, k=1, axes=(0, 1)).copy()
elif len(sample) == 4:
border_label = np.flip(sample['border_label'], axis=1).copy()
cell_label = np.flip(sample['cell_label'], axis=1).copy()
sample['border_label'] = np.rot90(border_label, k=1, axes=(0, 1)).copy()
sample['cell_label'] = np.rot90(cell_label, k=1, axes=(0, 1)).copy()
elif h == 7: # Flip up-down + rotate 90°
img = np.flip(img, axis=0).copy()
sample['image'] = np.rot90(img, axes=(0, 1)).copy()
if len(sample) == 3:
label_img = np.flip(sample['label'], axis=0).copy()
sample['label'] = np.rot90(label_img, k=1, axes=(0, 1)).copy()
elif len(sample) == 4:
border_label = np.flip(sample['border_label'], axis=0).copy()
cell_label = np.flip(sample['cell_label'], axis=0).copy()
sample['border_label'] = np.rot90(border_label, k=1, axes=(0, 1)).copy()
sample['cell_label'] = np.rot90(cell_label, k=1, axes=(0, 1)).copy()
return sample
class Noise(object):
""" Gaussian noise augmentation """
def __init__(self, p=0.25):
"""
param p: Probability to apply augmentation to an image.
:type p: float
"""
self.p = p
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
if random.random() < self.p:
# Add noise with sigma 1-5% of image maximum
sigma = random.randint(1, 5) / 100 * np.max(sample['image'])
# Add noise to selected images
seq = iaa.Sequential([iaa.AdditiveGaussianNoise(scale=sigma, per_channel=False)])
sample['image'] = seq.augment_image(sample['image'])
return sample
class Rotate(object):
""" Rotation augmentation (label-changing augmentation) """
def __init__(self, p=1):
"""
param p: Probability to apply augmentation to an image.
:type p: float
"""
self.p = p
self.angle = (-45, 45)
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
if random.random() < self.p:
angle = random.uniform(self.angle[0], self.angle[1])
seq1 = iaa.Sequential([iaa.Affine(rotate=angle)]).to_deterministic()
seq2 = iaa.Sequential([iaa.Affine(rotate=angle, order=0)]).to_deterministic()
sample['image'] = seq1.augment_image(sample['image'])
if len(sample) == 3:
if sample['label'].dtype == np.uint8:
sample['label'] = seq2.augment_image(sample['label'])
else:
sample['label'] = seq1.augment_image(sample['label'])
elif len(sample) == 4:
if sample['border_label'].dtype == np.uint8:
sample['border_label'] = seq2.augment_image(sample['border_label'])
else:
sample['border_label'] = seq1.augment_image(sample['border_label'])
if sample['cell_label'].dtype == np.uint8:
sample['cell_label'] = seq2.augment_image(sample['cell_label'])
else:
sample['cell_label'] = seq1.augment_image(sample['cell_label'])
else:
raise Exception('Unsupported sample format.')
return sample
class Scaling(object):
""" Scaling augmentation (label-changing transformation) """
def __init__(self, p=1):
"""
param p: Probability to apply augmentation to an image.
:type p: float
"""
self.p = p
self.scale = (0.85, 1.15)
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
if random.random() < self.p:
scale1 = random.uniform(self.scale[0], self.scale[1])
scale2 = random.uniform(self.scale[0], self.scale[1])
seq1 = iaa.Sequential([iaa.Affine(scale={"x": scale1, "y": scale2})])
seq2 = iaa.Sequential([iaa.Affine(scale={"x": scale1, "y": scale2}, order=0)])
sample['image'] = seq1.augment_image(sample['image'])
if len(sample) == 3:
if sample['label'].dtype == np.uint8:
sample['label'] = seq2.augment_image(sample['label'])
else:
sample['label'] = seq1.augment_image(sample['label']).copy()
elif len(sample) == 4:
if sample['border_label'].dtype == np.uint8:
sample['border_label'] = seq2.augment_image(sample['border_label'])
else:
sample['border_label'] = seq1.augment_image(sample['border_label'])
if sample['cell_label'].dtype == np.uint8:
sample['cell_label'] = seq2.augment_image(sample['cell_label'])
else:
sample['cell_label'] = seq1.augment_image(sample['cell_label'])
else:
raise Exception('Unsupported sample format.')
return sample
class ToTensor(object):
""" Convert image and label image to Torch tensors """
def __init__(self, label_type, min_value, max_value):
"""
:param min_value: Minimum value for the normalization. All values below this value are clipped
:type min_value: int
:param max_value: Maximum value for the normalization. All values above this value are clipped.
:type max_value: int
"""
self.min_value = min_value
self.max_value = max_value
self.label_type = label_type
def __call__(self, sample):
"""
:param sample: Dictionary containing image and label image (numpy arrays).
:type sample: dict
:return: Dictionary containing augmented image and label image (numpy arrays).
"""
# Normalize image
sample['image'] = min_max_normalization(sample['image'], min_value=self.min_value, max_value=self.max_value)
# Swap axes from (H, W, Channels) to (Channels, H, W)
for key in sample:
if key != 'id':
sample[key] = np.transpose(sample[key], (2, 0, 1))
img = torch.from_numpy(sample['image']).to(torch.float)
if self.label_type == 'boundary': # loss needs long tensor with shape [batch, height, width]
label = torch.from_numpy(sample['label'])[0, :, :].to(torch.long)
return img, label
elif self.label_type == 'distance': # loss needs float tensor with shape [batch, channels, height, width]
cell_label = torch.from_numpy(sample['cell_label']).to(torch.float)
border_label = torch.from_numpy(sample['border_label']).to(torch.float)
return img, border_label, cell_label
else:
raise Exception('Unknown label type')
| [
"[email protected]"
] | |
ea876ca3108b51995f148ecb425ba1ea887579b0 | e5718bfa592ca171fd4100116ce566ecb0883e10 | /lib/to_delete/ConvDeconvClassesMnist_mods.py | 25cd606c3926001bf43fd87215e721ac80405fbe | [
"MIT"
] | permissive | joaquimlyrio/feature-visualization | e0d4eba7d9f5d3e99c917d00a6780e59fbf77ef5 | 47ead5044d1239dba1133b7ea812b2ed7f2564dc | refs/heads/master | 2021-08-30T13:03:09.928336 | 2017-12-18T03:27:05 | 2017-12-18T03:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,528 | py |
###
### maybe try to use tf.nn.conv2d?
###
# Imports
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import random
import math
from scipy.misc import imsave
###
### Convolutional Neural Network (CNN) for MNIST
###
class CnnMnist:
def __init__( self, session, n_in, n_out, mode ):
# instantiate session
self.session = session
self.n_in = n_in # 28*28
self.n_out = n_out # 10
self.mode = mode
# data placeholders
self.x = tf.placeholder(tf.float32, [None, n_in], name='x')
self.y = tf.placeholder(tf.float32, [None, n_out], name='y')
self.x_in = tf.reshape(self.x, [-1,self.n_in])
self.W_c1 = tf.get_variable( 'W_c1', shape = [ 5, 5, 1, 32 ] )
self.W_c2 = tf.get_variable( 'W_c2', shape = [ 5, 5, 32, 64 ] )
##
## Network Architecture
##
# Input Layer
self.input_layer = tf.reshape(self.x, [-1, 28, 28, 1])
#
# Convolutional Layer #1
#
# filter
self.conv1 = tf.nn.conv2d(
input = self.input_layer,
filter = self.W_c1,
padding = "SAME",
strides = [1,1,1,1] )
# relu
self.relu1 = tf.nn.relu( self.conv1 )
#
# Pooling Layer #1
#
self.pool1 = tf.layers.max_pooling2d(inputs=self.relu1, pool_size=[2, 2], strides=2)
#
# Convolutional Layer #2
#
# filter
self.conv2 = tf.nn.conv2d(
input = self.pool1,
filter = self.W_c2,
padding = "SAME",
strides = [1,1,1,1] )
# relu
self.relu2 = tf.nn.relu( self.conv2 )
#
# Pooling layer #2
#
self.pool2 = tf.layers.max_pooling2d(inputs=self.conv2, pool_size=[2, 2], strides=2)
#
# Dense Layer
#
pool2_flat = tf.reshape(self.pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training = self.mode )
# Logits Layer
self.logits = tf.layers.dense(inputs=dropout, units=10)
self.q = tf.argmax(input = self.logits, axis=1)
# Output Layer
onehot_labels = tf.one_hot( indices = tf.cast(self.y, tf.int32), depth = 10 )
self.loss = tf.nn.softmax_cross_entropy_with_logits(
labels = self.y, logits = self.logits )
self.train_step = tf.train.AdamOptimizer(1e-3).minimize(self.loss)
# method to compute y given x
def compute(self, x):
return self.session.run(self.q, feed_dict={self.x:np.reshape(x,[-1,self.n_in])})
# method to train network
def train(self, x_batch, y_batch):
# take a training step
_ = self.session.run(self.train_step, feed_dict={self.x: x_batch, self.y: y_batch})
# acessor method for output after pooling layers
def getPools(self):
return ( self.pool1, self.pool2 )
# acessor method for output after convolutional layers
def getConvs(self):
return ( self.conv1, self.conv2 )
# acessor method for loss
def getLoss(self):
return self.loss
# saver method to save trained cnn in disk
def netSaver(self, savePath):
saver = tf.train.Saver()
saver.save(self.session, savePath)
print("Model saved in file: %s" % savePath)
# loader method to restore weights of a pretrained cnn
def netLoader(self, loadPath):
loader = tf.train.Saver({"W_c1":self.W_c1, "W_c2":self.W_c2})
restoredModel= loader.restore(self.session, loadPath)
print("Model restored from %s" % loadPath)
# method to initialize filter weights
def initWeight(shape):
weights = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(weights)
# method to instantiate deconvolutional neural net
def createDeconvNet(self, inputImage, inputLabel):
return CnnMnist.DeconvMnist( self, self.session, self.n_in, self.n_out, inputImage, inputLabel )
#''' DON'T COMMENT ME PLEASE!!!
###
### Nested Class: Deconvolutional Neural Network (CNN) for MNIST
###
class DeconvMnist:
def __init__( self, outer, session, inDim, outDim, inputImage, inputLabel ):
# data placeholders
#self.inputImage = tf.placeholder(tf.float32, [None, inDim], name='x')
#self.inputLabel = tf.placeholder(tf.float32, [None, outDim], name='y')
# instantiate outer class in inner class
self.cnn = outer
self.inDim = inDim
self.outDim = outDim
self.sess = session
activations1 = self.calculateActivations( inputImage, inputLabel, 1 )
self.deconv1 = self.deconvLayer1( inputImage, inputLabel, activations1 )
activations2 = self.calculateActivations(inputImage, inputLabel, 2)
self.deconv2 = self.deconvLayer2( inputImage, inputLabel, activations2 )
def deconvLayer1( self, inputImage, inputLabel, activations1 ):
#
## Deconvoluting 1st layer
##
# get activations for layer 1
#activations1 = self.calculateActivations( inputImage, inputLabel, 1 )
# convert from array to tensor
act1_tf = tf.convert_to_tensor( activations1, np.float32 )
# unpool
unPool1 = self.unpool( act1_tf )
# unrelu
unRelu1 = tf.nn.relu( unPool1 )
# deconvolute (filter)
unConv1 = tf.nn.conv2d_transpose( # check dimensions
#activations1,
unRelu1,
self.cnn.W_c1,
output_shape = [ inputImage.shape[0], 28, 28, 1],
strides = [1, 1, 1, 1],
padding = "SAME" )
return unConv1
def deconvLayer2( self, inputImage, inputLabel, activations2 ):
##
## Deconvoluting 2nd layer
##
# get activations for layer 2
# activations2 = self.calculateActivations(inputImage, inputLabel, 2)
# convert from array to tensor
act1_tf = tf.convert_to_tensor( activations2, np.float32 )
# 1st unpool
unPool1 = self.unpool( act1_tf )
# 1st unrelu
unRelu1 = tf.nn.relu( unPool1 )
# 1st deconvolute (filter)
unConv1 = tf.nn.conv2d_transpose(
#activations1,
unRelu1,
self.cnn.W_c2,
output_shape = [ inputImage.shape[0], 14, 14, 32],
strides = [1, 1, 1, 1],
padding = "SAME" )
# 2nd unpool
unPool2 = self.unpool( unConv1 )
# 2nd relu
unRelu2 = tf.nn.relu( unPool2 )
# 2nd deconvolute (filter)
# 1st deconvolute (filter)
unConv2 = tf.nn.conv2d_transpose(
#activations1,
unRelu2,
self.cnn.W_c1,
output_shape = [ inputImage.shape[0], 28, 28, 1],
strides = [1, 1, 1, 1],
padding = "SAME" )
return unConv2
# calculate activations for layer (1 or 2)
def calculateActivations( self, inputImage, inputLabel, layer ):
if( layer == 1 ):
return self.cnn.pool1.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,self.inDim])})
else:
return self.cnn.pool2.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,self.inDim])})
def getDeconv( self ):
return self.deconv1, self.deconv2
# method to unpool (taken from kvfrans - put link!)
def unpool( self, value ):
"""N-dimensional version of the unpooling operation from
https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf
:param value: A Tensor of shape [b, d0, d1, ..., dn, ch]
:return: A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]
"""
#with tf.name_scope(name) as scope:
sh = value.get_shape().as_list()
dim = len(sh[1:-1])
out = (tf.reshape(value, [-1] + sh[-dim:]))
for i in range(dim, 0, -1):
out = tf.concat( [out, out], i)
out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]
out = tf.reshape(out, out_size)#, name=scope)
return out
#Returns de deconvoluted layer1 as numpy array, with isolated nodes,
#and save the images on the "img" folder
def displayFeatures1( self, inputImage, inputLabel):
#
## Deconvoluting 1st layer
##
# get activations for layer 1
activations1 = self.calculateActivations( inputImage, inputLabel, 1 )
filters = activations1.shape[-1]
batch_size = activations1.shape[0]
all_isolations = np.zeros([filters, batch_size, 28, 28, 1])
for i in range(filters):
# Isolate filters
if i % 5 == 0:
print("Deconvoluting Layer 1 activation number: {}".format(i))
isolated = activations1.copy()
isolated[:,:,:,:i] = 0
isolated[:,:,:,i+1:] = 0
unConv1 = self.deconvLayer1( inputImage, inputLabel, isolated )
u = unConv1.eval()
imsave("img/Deconv1_Node_{}_of_N3.jpg".format(i), u[1,:,:,0])
all_isolations[i,:,:,:,:] = u
return all_isolations
def displayFeatures2( self, inputImage, inputLabel ):
##
## Deconvoluting 2nd layer
##
# get activations for layer 2
activations2 = self.calculateActivations(inputImage, inputLabel, 2)
filters = activations2.shape[-1]
batch_size = activations2.shape[0]
all_isolations = np.zeros([filters, batch_size, 28, 28, 1])
for i in range(filters):
# Isolate filters
if i % 5 == 0:
print("Deconvoluting Layer 2 activation number: {}".format(i))
isolated = activations2.copy()
isolated[:,:,:,:i] = 0
isolated[:,:,:,i+1:] = 0
unConv2 = self.deconvLayer2( inputImage, inputLabel, isolated )
u = unConv2.eval()
imsave("img/Deconv2_Node_{}_of_N3.jpg".format(i), u[1,:,:,0])
all_isolations[i,:,:,:,:] = u
return all_isolations
# if layer == 1:
# isolated = self.activations1.copy()
# isolated[:,:,:,:1] = 0
# isolated[:,:,:,1+1:] = 0
# return isolated
# #print("isolated shape")
# #print (np.shape(isolated))
# #totals = np.sum( isolated, axis = (1,2,3) )
# #best = np.argmin( totals, axis = 0 )
# #print (best)
# #pixelactive = self.unPool1.eval(feed_dict={self.unPool1PlaceHolder: isolated})
# #pixelactive = self.unConv1.eval(feed_dict={self.unConv1PlaceHolder: isolated[5,:,:,1]})
# else:
# # isolated = self.activations2.copy()
# # isolated[:,:,:,:1] = 0
# # isolated[:,:,:,1+1:] = 0
# # #print (np.shape(isolated))
# # totals = np.sum( isolated, axis = (1,2,3) )
# # best = np.argmin( totals, axis = 0 )
# # #print (best)
# # #pixelactive = self.unPool2.eval(feed_dict={self.unPool2PlaceHolder: isolated})
# # pixelactive = self.unConv2.eval(feed_dict={self.unConv2PlaceHolder: isolated})
# # saves pixel-representations of features from Conv layer 1
# featuresReLu1 = tf.placeholder("float",[None,32,32,32])
# unReLu = tf.nn.relu(featuresReLu1)
# unBias = unReLu
# unConv = tf.nn.conv2d_transpose(unBias, wConv1, output_shape=[batchsizeFeatures,imagesize,imagesize,colors] , strides=[1,1,1,1], padding="SAME")
# activations1 = relu1.eval(feed_dict={img: inputImage, lbl: inputLabel, keepProb: 1.0})
# print (np.shape(activations1))
# # display features
# for i in xrange(32):
# isolated = self.activations1.copy()
# isolated[:,:,:,:i] = 0
# isolated[:,:,:,i+1:] = 0
# #print (np.shape(isolated))
# totals = np.sum( isolated, axis = (1,2,3) )
# best = np.argmin( totals, axis = 0 )
# #print (best)
# pixelactive = self.unConv1.eval(feed_dict={self.unPool1PlaceHolder: isolated})
# # totals = np.sum(pixelactive,axis=(1,2,3))
# # best = np.argmax(totals,axis=0)
# # best = 0
# saveImage(pixelactive[best],"activ"+str(i)+".png")
# saveImage(inputImage[best],"activ"+str(i)+"-base.png")
# return False
#''' DON'T COMMENT ME PLEASE!!!
| [
"[email protected]"
] | |
9004f791a1dcd8b9174c7e98612c338f9f24f0b8 | 1bfab43b76b084e428e8d0ff1809afd6d20acc35 | /on luyen python/django/render_templates/render_templates/settings.py | 02ecb7968a902fe613e0f578cd8570a56debf306 | [] | no_license | namdh1985/python | 807a292c24e225b7d671fe2138b1d3d63865d191 | 35c2760326b04e264ed35342a73262e816be8798 | refs/heads/master | 2022-12-05T10:20:48.910118 | 2020-08-26T14:28:57 | 2020-08-26T14:28:57 | 287,443,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | """
Django settings for render_templates project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5ydje*ej)h3+#6)h#jg7npz#hiha^w19=#ecys9i26*^i%k$p$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'render_templates.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'render_templates.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
e0912c5ac778e21ca948703da25cf176eec9062e | 415a2bcd0350270956e94a8e1adb90216b1293e8 | /venv/Scripts/pip-script.py | 1f44689ada4571fcbbab8ad5ec3c9978f3602acd | [] | no_license | unclet1999/Sort | 1205d0c19b7603d8960a9701e27e0c1c60ecb6ab | b620dd714d1d0b86e6fd0fecfec19de03f5a9c48 | refs/heads/master | 2020-08-23T11:29:37.636864 | 2019-10-28T18:03:01 | 2019-10-28T18:03:01 | 216,606,298 | 0 | 1 | null | 2019-10-28T19:16:43 | 2019-10-21T15:48:07 | Python | UTF-8 | Python | false | false | 408 | py | #!C:\Users\Uncle\Desktop\Sort_Project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
003ffc8b931687487cf729b77a66c0d3d2b33116 | af41debd568a7e2701f6be440b2a60d3afb3afef | /py_tests/t_queue.py | 7cf04f164bead3a0ce1390479b5b403e501f57fd | [] | no_license | rimelis/BBB-Alarm | f948b208df2e5529445689b3bf2f21bc3df381a0 | cd3539746ec118d5556bc096f0816ad2ceb29ed4 | refs/heads/master | 2021-01-01T17:54:25.204728 | 2019-03-19T15:37:40 | 2019-03-19T15:37:40 | 98,190,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import threading
import time
import logging
import random
import queue
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
BUF_SIZE = 10
q = queue.Queue(BUF_SIZE)
class ProducerThread(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
super(ProducerThread,self).__init__()
self.target = target
self.name = name
def run(self):
while True:
if not q.full():
item = random.randint(1,10)
q.put(item)
logging.debug('Putting ' + str(item)
+ ' : ' + str(q.qsize()) + ' items in queue')
time.sleep(random.random())
return
class ConsumerThread(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
super(ConsumerThread,self).__init__()
self.target = target
self.name = name
return
def run(self):
while True:
if not q.empty():
item = q.get()
logging.debug('Getting ' + str(item)
+ ' : ' + str(q.qsize()) + ' items in queue')
time.sleep(random.random())
return
if __name__ == '__main__':
p = ProducerThread(name='producer')
c = ConsumerThread(name='consumer')
p.start()
time.sleep(2)
c.start()
time.sleep(2)
| [
"[email protected]"
] | |
b4b48833b14eeae1819479c4994e066e45300d1c | d0dccd8b1c31c0256dca3472719acab561661aa9 | /events/views.py | 8f52985d56e7d9d48486c2516ac1ab2f8b850635 | [] | no_license | cjredmond/GrouperApp | 5fe97271bc275e570d2e3565c2bb5233ce34a79d | aba431c7def9173150e24686dbbb87685d25ed24 | refs/heads/master | 2020-03-19T21:43:12.609648 | 2018-06-29T16:17:10 | 2018-06-29T16:17:10 | 136,947,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django.shortcuts import render
from django.views.generic import *
from django.views.generic.edit import *
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Event
from .forms import EventCreateForm
from group.models import Entity
class EventCreateView(CreateView):
model = Event
form_class = EventCreateForm
def form_valid(self,form,**kwargs):
instance = form.save(commit=False)
instance.entity = Entity.objects.get(slug=self.kwargs['slug'])
return super().form_valid(form)
def get_success_url(self):
return reverse('landing_view')
| [
"[email protected]"
] | |
ee0c4d0093eb8fb6ee16511f21e8fcbcd1f15975 | d0b6e0994331a27e4828a42256cf2a4960e6fc77 | /src/visualization/visualize.py | fad8beca4b6d54169a1b7d0eec11dbfd9f7f9ab5 | [] | no_license | MikkelMathiasen23/mlops_cookiecutter | 08593f1475a164def363d7bf9ca231000f29efd9 | 9ec63e756257abaa793658db7d8671213cead17e | refs/heads/master | 2023-06-02T07:48:28.159788 | 2021-06-17T08:28:27 | 2021-06-17T08:28:27 | 374,989,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | import argparse
import sys
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
import torch.nn as nn
from sklearn.manifold import TSNE
from src.data.make_dataset import mnist
from src.models.model import MNIST_NET
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--modelpath', default='models/0_checkpoint.pth')
parser.add_argument('--n_components', default=2)
args = parser.parse_args(sys.argv[2:])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_, testset = mnist()
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)
model = MNIST_NET().to(device)
state_dict = torch.load(args.modelpath)
model.load_state_dict(state_dict)
modules = list(model.children())[:-1]
modules2 = list(model.classifier.children())[:-4]
model = nn.Sequential(*modules)
model2 = nn.Sequential(*modules2)
out = []
lab = []
with torch.no_grad():
# validation pass here
model.eval()
for batch_idx, (images, labels) in enumerate(testloader):
x = model(images)
x = x.view(x.size(0), -1)
x = model2(x)
out.append(x)
lab.append(labels)
out = torch.cat(out, dim=0)
lab = torch.cat(lab, dim=0)
tsne = TSNE(args.n_components)
x_new = tsne.fit_transform(out)
df = pd.DataFrame(lab, columns=['label'])
df['tsne-1'] = x_new[:, 0]
df['tsne-2'] = x_new[:, 1]
plt.figure(figsize=(16, 10))
sns.scatterplot(x="tsne-1",
y="tsne-2",
hue="label",
palette=sns.color_palette("hls", 10),
data=df,
legend="full",
alpha=0.3)
plt.savefig('reports/figures/tsne_features.png')
| [
"[email protected]"
] | |
e0d2036b5459a0bba69f3c1b91adeb65c9e1cccb | f1d850a59052deb004b8742a468d3f03e5d849cc | /youtap.py | e94858323194787eee1ba093a236114c4f4ccc5e | [
"Apache-2.0"
] | permissive | hubert10/youtap | 00c5bf168d6967b17400a913932e2d6e7cec45ef | 232134389336afd316f79d09689e127630d087ae | refs/heads/master | 2020-12-28T12:18:22.928153 | 2011-07-26T21:52:11 | 2011-07-26T21:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | #!/usr/bin/python
#
# Copyright [2011] Sundar Srinivasan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Sundar Srinivasan ([email protected]) Twitter: @krishnasun
__author__ = ('Sundar Srinivasan')
import re
import sys
import urllib2
def getVideoUrl(content):
fmtre = re.search('(?<=fmt_url_map=).*', content)
grps = fmtre.group(0).split('&')
vurls = urllib2.unquote(grps[0])
videoUrl = None
for vurl in vurls.split('|'):
if vurl.find('itag=5') > 0:
return vurl
return None
def getTitle(content):
title = content.split('</title>', 1)[0].split('<title>')[1]
return sanitizeTitle(title)
def sanitizeTitle(rawtitle):
rawtitle = urllib2.unquote(rawtitle)
lines = rawtitle.split('\n')
title = ''
for line in lines:
san = unicode(re.sub('[^\w\s-]', '', line).strip())
san = re.sub('[-\s]+', '_', san)
title = title + san
ffr = title[:4]
title = title[5:].split(ffr, 1)[0]
return title
def downloadVideo(f, resp):
totalSize = int(resp.info().getheader('Content-Length').strip())
currentSize = 0
CHUNK_SIZE = 32768
while True:
data = resp.read(CHUNK_SIZE)
if not data:
break
currentSize += len(data)
f.write(data)
print('============> ' + \
str(round(float(currentSize*100)/totalSize, 2)) + \
'% of ' + str(totalSize) + ' bytes')
if currentSize >= totalSize:
break
return
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python youtap.py \"<youtube-url>\"")
exit(1)
urlname = sys.argv[1].split('&', 1)[0]
print('Downloading: ' + urlname)
try:
resp = urllib2.urlopen(urlname)
except urllib2.HTTPError:
print('Bad URL: 404')
exit(1)
content = resp.read()
videoUrl = getVideoUrl(content)
if not videoUrl:
print('Video URL cannot be found')
exit(1)
title = getTitle(content)
filename = title + '.flv'
print('Creating file: ' + filename)
f = open(filename, 'wb')
print('Download begins...')
## Download video
video = urllib2.urlopen(videoUrl)
downloadVideo(f, video)
f.flush()
f.close()
exit(0)
| [
"[email protected]"
] | |
a3dcc90e068ad557401a75428293332468a08fb3 | ebff4045910369fafd56ad369f2de0da992dd221 | /button.py | 4d8510cc5a5355da8a451c9d60a87dfe6a408fa8 | [] | no_license | galaxy-dust/alien_invasion | 0d654e33af3fc4cd49cddcf7dc3faf2d959bf8fb | 206141b0fde7ee30bed41cc6850ec2b2962137b0 | refs/heads/master | 2020-04-10T00:29:37.037805 | 2018-12-06T14:32:04 | 2018-12-06T14:32:04 | 160,686,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | import pygame.font
import pygame
class Button():
def __init__(self,ai_settings, screen, msg):
self.ai_settings = ai_settings
self.screen = screen
self.screen_rect = self.screen.get_rect()
self.width, self.height = 200, 50
self.button_color = (0,255,0)
self.text_color =(255,255,255)
self.font = pygame.font.SysFont(None, 48)
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
self.prep_msg(msg)
def prep_msg(self,msg):
self.msg_image = self.font.render(msg, True, self.text_color,
self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.screen_rect.center
def draw_button(self):
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
| [
"[email protected]"
] | |
16a366d7fe0e7f788721b4bb5f0dad1de7ce646f | 77929020bd02471b46e5627de621e13870876216 | /weather/urls.py | 8dbbf64b32fbd85072cbfb07c8e9ca9d83869b31 | [] | no_license | Hiwa2719/weather | 0c8b593ce8935b938e72616d41867bd75fd0acab | b16b9c7cf90dda43d8d6e34c8f7d8200b7454c37 | refs/heads/main | 2023-04-14T23:01:16.794996 | 2021-04-23T21:36:30 | 2021-04-23T21:36:30 | 348,776,625 | 0 | 0 | null | 2021-04-23T18:23:10 | 2021-03-17T16:23:58 | SCSS | UTF-8 | Python | false | false | 366 | py | from django.urls import path
from . import views
app_name = 'weather'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('new-list/', views.NewList.as_view(), name='new_list'),
path('remove-city/', views.RemoveCity.as_view(), name='remove_city'),
path('model-detail/', views.GetModalDetail.as_view(), name='modal-detail')
]
| [
"[email protected]"
] | |
e94a700f968a82baaa9b58e135e2d927dc8dca63 | 43e79af67945b3bf580d294d09da63d0379b4ae0 | /script/relative_kinect_publisher.py | 58d7290d3d026b3ed5f6e8d0de08c01edd44fa41 | [] | no_license | payamn/following-ahead | 3ef0caf0a50cdbec877c98051969661a42a98a90 | 25743f8683df3e96d79355422bd202a935b61c28 | refs/heads/master | 2021-09-05T17:24:30.442715 | 2017-09-20T03:28:47 | 2017-09-20T03:28:47 | 104,242,088 | 0 | 0 | null | 2017-09-20T16:44:00 | 2017-09-20T16:44:00 | null | UTF-8 | Python | false | false | 2,396 | py | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import TransformStamped
import message_filters
import tf as transform
import numpy as np
tfObj = transform.TransformerROS(True, rospy.Duration(10.0))
relativePosePub = rospy.Publisher('/person_follower/groundtruth_pose', TransformStamped, queue_size=1)
def callback(helmetMsg, kinectMsg):
helmetPose = tfObj.fromTranslationRotation(
(helmetMsg.transform.translation.x, helmetMsg.transform.translation.y, helmetMsg.transform.translation.z),
(helmetMsg.transform.rotation.x, helmetMsg.transform.rotation.y, helmetMsg.transform.rotation.z, helmetMsg.transform.rotation.w)
)
kinectPose = tfObj.fromTranslationRotation(
(kinectMsg.transform.translation.x, kinectMsg.transform.translation.y, kinectMsg.transform.translation.z),
(kinectMsg.transform.rotation.x, kinectMsg.transform.rotation.y, kinectMsg.transform.rotation.z, kinectMsg.transform.rotation.w)
)
relativePose = np.dot(np.linalg.inv(kinectPose), helmetPose)
relativeQuaternion = transform.transformations.quaternion_from_matrix(relativePose)
relativePoseMsg = TransformStamped()
relativePoseMsg.header.frame_id = "world"
relativePoseMsg.child_frame_id = "person_follower/helmet_relative"
relativePoseMsg.transform.translation.x = relativePose[0, 3]
relativePoseMsg.transform.translation.y = relativePose[1, 3]
relativePoseMsg.transform.translation.z = relativePose[2, 3]
relativePoseMsg.transform.rotation.x = relativeQuaternion[0]
relativePoseMsg.transform.rotation.y = relativeQuaternion[1]
relativePoseMsg.transform.rotation.z = relativeQuaternion[2]
relativePoseMsg.transform.rotation.w = relativeQuaternion[3]
relativePosePub.publish(relativePoseMsg)
br = transform.TransformBroadcaster()
br.sendTransform(
(relativePose[0, 3], relativePose[1, 3], relativePose[2, 3]),
relativeQuaternion,
rospy.Time.now(),
relativePoseMsg.child_frame_id,
"world"
)
if __name__ == '__main__':
rospy.init_node('node_name')
helmetSub = message_filters.Subscriber('/vicon/helmet/helmet', TransformStamped)
kinectSub = message_filters.Subscriber('/vicon/husky_follower/husky_follower', TransformStamped)
ts = message_filters.TimeSynchronizer([helmetSub, kinectSub], 10)
ts.registerCallback(callback)
rospy.spin() | [
"[email protected]"
] | |
187962dd8c17294e9070e38afbd203c6d97037a2 | 1258e001beb0c5aabb866a170625698e9385d3e9 | /Alura/diff21.py | 1295c6d8cd97d286c3f377611f0ca9d13ae814b9 | [] | no_license | mgarcia86/python | 7b3d450d7a0cb5623af446074784f247eb113497 | 66dd2e0dd6000a6d500dca594b23b54acae88169 | refs/heads/master | 2020-03-16T19:46:28.420744 | 2018-05-10T19:32:28 | 2018-05-10T19:32:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | def diff21():
if n > 21:
return 2 * (abs(21 - n))
elif n <= 21:
return abs(21 - n)
def sleep():
if not week:
return True
elif week and not holiday:
return False
else:
return True
def nao_string(str):
if len(str) >= 3 and str[:3] == 'not':
return str
return 'not ' + str
def monkey_trouble(a_smile, b_smile):
if a_smile and b_smile:
return True
elif not a_smile and not b_smile:
return True
else:
return False
def near_hundred(n):
if n in range(abs(10, 100)) or n in range(abs(10, 200)):
return True
else:
return False | [
"[email protected]"
] | |
15a4d561dfb7b03334bf6343e7c40bcd382ae8a9 | a7893ea023f4839fa0447845b88fbf2536966ba0 | /FASRGAN_and_Fs-SRGAN/codes/models/__init__.py | 035089f9d36bccbf24381558a598506920d7475d | [] | no_license | Rainyfish/FASRGAN-and-Fs-SRGAN | c84f6e0220d48271799b521237b937c9383e6471 | 2d85fc0bb5cb65d4ef4101c1953f0322fd3effbf | refs/heads/master | 2021-06-21T23:53:35.992689 | 2021-03-14T10:22:56 | 2021-03-14T10:22:56 | 201,067,476 | 24 | 6 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | import logging
logger = logging.getLogger('base')
def create_model(opt):
model = opt['model']
if model == 'sr':
from .SR_model import SRModel as M
elif model == 'FsSRModel':
from .FsSR_model import FsSRModel as M
elif model == 'srgan':
from .SRGAN_model import SRGANModel as M
elif model == 'srragan':
from .SRRaGAN_model import SRRaGANModel as M
elif model == 'sftgan':
from .SFTGAN_ACD_model import SFTGAN_ACD_Model as M
elif model =='FASRGAN': #Fine-grained attention SRGAN
from .FASRGAN_model import FASRGANModel as M
elif model =='FsSRGAN': #feature-sharing SRGAN
from .FsSRGAN_model import FsSRGANModel as M
elif model == 'FAFS_SRGAN':
from .FAFS_SRGAN_model import FAFS_SRGANModel as M
else:
raise NotImplementedError('Model [{:s}] not recognized.'.format(model))
m = M(opt)
logger.info('Model [{:s}] is created.'.format(m.__class__.__name__))
return m
| [
"[email protected]"
] | |
0b04dcafbb7bafeed73250d6bc7254c906731ccc | 7486b3af4d4413a96b3e0bf76f776cd8605d7c05 | /koalakid1/Divide Conquer/bj-2630.py | bdd6711efba8a0bed6260a699fb4fb5d2babdeb5 | [] | no_license | WonyJeong/algorithm-study | 7146e18ec9a3d7f46910e31890768b2e37f8b9b4 | dd659bf75c902800bed226d392d144b691d8e059 | refs/heads/main | 2023-03-31T14:38:47.365622 | 2021-04-02T01:35:36 | 2021-04-02T01:35:36 | 334,309,434 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import sys
def quadtree(n, x, y):
global graph, blue, white
point = graph[x][y]
for i in range(x, x + n):
for j in range(y, y + n):
if point != graph[i][j]:
quadtree(n//2, x, y)
quadtree(n//2, x, y+n//2)
quadtree(n//2, x+n//2, y)
quadtree(n//2, x+n//2, y+n//2)
return
if point:
blue += 1
return
else:
white += 1
return
if __name__ == "__main__":
input = sys.stdin.readline
n = int(input())
graph = [list(map(int, input().strip().split())) for _ in range(n)]
blue, white = 0, 0
quadtree(n, 0, 0)
print(white)
print(blue)
| [
"[email protected]"
] | |
cbf79af35fb2759d0e7ec9a19ca7f7380778de56 | 02d003627dc39487c43827de23f8a26faa616574 | /ds/array/contains_dup.py | b279e96b09329f845a71fa8705cba64e06c63801 | [] | no_license | amitt001/cs-review | 63d31739d70c24eb1204537d2e37b4bbfb195e36 | f04987b32f2598e6f058edd44c2e586894dab704 | refs/heads/master | 2020-05-24T23:32:50.234181 | 2019-09-03T14:26:18 | 2019-09-03T14:26:18 | 187,516,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | """Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
Input: [1,2,3,1]
Output: true
Input: [1,2,3,4]
Output: false
"""
def containsDuplicate(nums) -> bool:
s = set()
for i in nums:
if i in s:
return True
s.add(i)
return False
def sol_const_space(nums) -> bool:
if not nums:
return
nums.sort()
last_val = nums[0]
for i in nums[1:]:
if i == last_val:
return True
last_val = i
return False
if __name__ == '__main__':
arr = [1, 2, 3, 1]
assert containsDuplicate(arr) is True
assert sol_const_space(arr) is True
arr = [1, 2, 3, 4]
assert containsDuplicate(arr) is False
assert sol_const_space(arr) is False
| [
"[email protected]"
] | |
8b4a671e8f15ec14fb5e1bf7b5b95915b9486d2a | c10d080c40161633a922b1274b92d3048555ea5b | /src/interaction.py | 5d3b9721a9476bcf679456078f04c14a32d3b311 | [
"MIT"
] | permissive | leolani/leolani-datarepresentation | 3c784bb8401c3b67efe2b80ac4a34498f0f9e827 | bc2975310fe623f7548db54bf5d691c7bbcf0c1e | refs/heads/main | 2023-01-06T07:52:45.733312 | 2020-10-23T09:10:29 | 2020-10-23T09:10:29 | 306,548,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,604 | py | import os
import re
import platform
import requests
import pycountry
import subprocess
from time import time
from datetime import datetime
from random import getrandbits
from typing import List, Iterable, Dict, Tuple, Optional
from vision import Object, Face, Observations
from language import Chat
class Context(object):
"""
Context Object
Contains Awareness for People, Objects & Conversations
"""
OBSERVATION_TIMEOUT = 60
_people = None # type: Dict[str, Tuple[Face, float]]
_objects = None # type: Observations
def __init__(self, name, friends):
# type: (str, Iterable[str]) -> None
self._id = getrandbits(128)
self._name = name
self._friends = friends
self._chats = []
self._chat_start = None
self._chatting = False
self._people = {}
self._current_people = []
self._objects = Observations()
self._intention = None
self._location = Location()
@property
def id(self):
# type: () -> int
"""
ID
Returns
-------
id: int
"""
return self._id
@property
def own_name(self):
# type: () -> str
"""
Returns
-------
str
The robot's own name
"""
return self._name
@property
def chats(self):
# type: () -> List[Chat]
"""
Returns
-------
chats: list of Chat
List of all Chats that were held during current session
"""
return self._chats
@property
def chatting(self):
# type: () -> bool
"""
Returns True when a Chat is happening
Returns
-------
chatting: bool
"""
return self._chatting
@property
def chat(self):
# type: () -> Optional[Chat]
"""
The Current Chat, if any
Returns
-------
chat: Optional[Chat]
"""
return self.chats[-1] if self.chatting else None
@property
def datetime(self): # When
# type: () -> datetime
"""
The Current Date & Time
Returns
-------
datetime: datetime
Current Date and Time
"""
return datetime.now()
@property
def location(self): # Where
# type: () -> Location
"""
The Current Location
Returns
-------
location: Location
Current Location
"""
return self._location
@property
def people(self): # Who
# type: () -> List[Face]
"""
People seen within Observation Timeout
Returns
-------
people: list of Face
List of People seen within Observation Timeout
"""
current_time = time()
return [person for person, t in self._people.values() if (current_time - t) < Context.OBSERVATION_TIMEOUT]
@property
def friends(self):
# type: () -> List[str]
"""
Names of all friends.
Returns
-------
List[str]
List of all friends names
"""
return self._friends
def current_people(self, in_chat=False, timeout=OBSERVATION_TIMEOUT):
# type: () -> List[Face]
"""
People seen currently in the Context
Returns
-------
people: list of Face
List of all People seen currently in the Context
"""
if in_chat and not self.chatting:
return []
current_time = time()
return [person for person, t in self._people.values()
if current_time - t <= timeout and (not in_chat or t >= self._chat_start)]
@property
def all_people(self):
# type: () -> List[Face]
"""
People seen since beginning of Context
Returns
-------
people: list of Face
List of all People seen since beginning of Context
"""
return [person for person, t in self._people.values()]
@property
def objects(self): # What
# type: () -> List[Object]
"""
Objects seen within Observation Timeout
Returns
-------
objects: list of Object
List of Objects seen within Observation Timeout
"""
return self._objects.instances
@property
def all_objects(self):
# type: () -> List[Object]
"""
Objects seen since beginning of Context
Returns
-------
objects: list of Object
List of all Objects since beginning of Context
"""
return self._objects.instances
def add_objects(self, objects):
# type: (List[Object]) -> None
"""
Add Object Observations to Context
Parameters
----------
objects: list of Object
List of Objects
"""
if objects:
self._objects.add_observations(objects[0].image, objects)
def add_people(self, people):
# type: (Iterable[Face]) -> None
"""
Add People Observations to Context
Parameters
----------
people: list of Face
List of People
"""
for person in people:
self._people[person.name] = (person, time())
def start_chat(self, speaker):
# type: (str) -> None
"""
Start Chat with Speaker
Parameters
----------
speaker: str
Name of Speaker
"""
self._chat_start = time()
self._chatting = True
self._chats.append(Chat(speaker, self))
def stop_chat(self):
# type: () -> None
"""Stop Chat"""
self._chat_start = None
self._chatting = False
class Location(object):
"""Location on Earth"""
UNKNOWN = "Unknown"
def __init__(self):
# TODO use UUIDs
self._id = getrandbits(128)
self._label = self.UNKNOWN
try:
loc = requests.get("https://ipinfo.io").json()
self._country = pycountry.countries.get(alpha_2=loc['country']).name
self._region = loc['region']
self._city = loc['city']
except:
self._country = self.UNKNOWN
self._region = self.UNKNOWN
self._city = self.UNKNOWN
@property
def id(self):
# type: () -> int
"""
ID for this Location object
Returns
-------
id: int
"""
return self._id
@property
def country(self):
# type: () -> str
"""
Country String
Returns
-------
country: str
"""
return self._country
@property
def region(self):
# type: () -> str
"""
Region String
Returns
-------
region: str
"""
return self._region
@property
def city(self):
# type: () -> str
"""
City String
Returns
-------
city: str
"""
return self._city
@property
def label(self):
# type: () -> str
"""
Learned Location Label
Returns
-------
label: str
"""
return self._label
@label.setter
def label(self, value):
# type: (str) -> None
"""
Learned Location Label
Parameters
----------
value: str
"""
self._label = value
@staticmethod
def _get_lat_lon():
# type: () -> Optional[Tuple[float, float]]
"""
Get Latitude & Longitude from GPS
Returns
-------
latlon: Optional[Tuple[float, float]]
GPS Latitude & Longitude
"""
try:
if platform.system() == "Darwin":
# Use WhereAmI tool by Rob Mathers -> https://github.com/robmathers/WhereAmI
whereami = os.path.join(os.path.dirname(__file__), 'util', 'whereami')
regex = "Latitude: (.+?)\nLongitude: (.+?)\n"
return tuple(float(coord) for coord in re.findall(regex, subprocess.check_output(whereami))[0])
else:
raise Exception()
except: # TODO: Add Support for (at least) Windows
print("Couldn't get GPS Coordinates")
return None
def __repr__(self):
return "{}({}, {}, {})".format(self.__class__.__name__, self.city, self.region, self.country)
| [
"[email protected]"
] | |
31a0c3c321b124e25d22c7584aa8ccbc4ed0ae04 | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/PyQt4/QtNetwork/__init__.py | 499f277c4fd5601ad24160f4fb960e5e5fc2f65f | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib64/python2.6/site-packages/PyQt4/QtNetwork.so
# by generator 1.136
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
# no functions
# classes
from QAbstractNetworkCache import QAbstractNetworkCache
from QAbstractSocket import QAbstractSocket
from QAuthenticator import QAuthenticator
from QFtp import QFtp
from QHostAddress import QHostAddress
from QHostInfo import QHostInfo
from QHttp import QHttp
from QHttpHeader import QHttpHeader
from QHttpRequestHeader import QHttpRequestHeader
from QHttpResponseHeader import QHttpResponseHeader
from QLocalServer import QLocalServer
from QLocalSocket import QLocalSocket
from QNetworkAccessManager import QNetworkAccessManager
from QNetworkAddressEntry import QNetworkAddressEntry
from QNetworkCacheMetaData import QNetworkCacheMetaData
from QNetworkCookie import QNetworkCookie
from QNetworkCookieJar import QNetworkCookieJar
from QNetworkDiskCache import QNetworkDiskCache
from QNetworkInterface import QNetworkInterface
from QNetworkProxy import QNetworkProxy
from QNetworkProxyFactory import QNetworkProxyFactory
from QNetworkProxyQuery import QNetworkProxyQuery
from QNetworkReply import QNetworkReply
from QNetworkRequest import QNetworkRequest
from QSsl import QSsl
from QSslCertificate import QSslCertificate
from QSslCipher import QSslCipher
from QSslConfiguration import QSslConfiguration
from QSslError import QSslError
from QSslKey import QSslKey
from QTcpSocket import QTcpSocket
from QSslSocket import QSslSocket
from QTcpServer import QTcpServer
from QUdpSocket import QUdpSocket
from QUrlInfo import QUrlInfo
| [
"[email protected]"
] | |
fc7d7b27a526a43db9c9b511ae29a4442acf81d4 | 0fb2e09c0629cf47045881d7eecc125f674230e5 | /pps_webapp/main/views.py | bf6d96c2869c358792ae6771da7c09201b547904 | [] | no_license | satwik77/phenopacket-scraper-webapp | ea24ad2cc2fbd988e12df1178be5ba940c8a9859 | 4382c2a4e501448e7bfd68c7826a3c4c5ab39a26 | refs/heads/master | 2021-01-17T09:33:07.188192 | 2016-08-23T17:24:20 | 2016-08-23T17:24:20 | 61,695,575 | 0 | 0 | null | 2016-06-22T06:45:50 | 2016-06-22T06:45:50 | null | UTF-8 | Python | false | false | 1,984 | py | from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.shortcuts import render
from django.contrib import auth
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import requests
import pprint
api_url= 'http://localhost:8001/api/'
@csrf_exempt
def home(request):
if request.POST:
choice = str(request.POST['choice'])
url = str(request.POST['url'])
data = ""
if choice == '1':
get_data={'url' : str(url)}
response = requests.get(api_url+ 'scrape', params = get_data)
if response.status_code == 200:
response_data = response.json()
abstract = response_data['Abstract']
title = str(response_data['Title'])
hpo_terms = response_data['HPO Terms']
data+= "Title:\n" + title + "\n"
data+="Abstract:\n" + abstract + "\n"
data+="HPO Terms:\n"
for term in hpo_terms:
data += str(term) + "\n"
if choice == '2':
get_data={'url' : str(url)}
response = requests.get(api_url+ 'annotate', params = get_data)
if response.status_code == 200:
response_data = response.json()
data = {}
data["annotated_terms"] = response_data['Annotated HPO Terms']
data["annotated_abstract"] = response_data['Annotated Abstract']
data= pprint.pformat(data, indent=4)
if choice == '3':
get_data={'url' : str(url)}
response = requests.get(api_url+ 'phenopacket', params = get_data)
if response.status_code == 200:
response_data = response.json()
phenopacket = response_data['phenopacket']
data = phenopacket
return HttpResponse(data)
return render(request, 'main/index.html')
| [
"[email protected]"
] | |
d6722ce189fbb5948c8db922d888a2d1878b4e68 | 6fa437f198e74ebac1446999303bd34a04378e3f | /wat/wat.py | 3cffd9f75ebfc222306612cb35ee58220fed0d5e | [] | no_license | 0f11/Python | 27f718e38cf40d2a8a348a9134660b619f4f6f20 | 7ba7941343560604539c2ad61b94afd660f8279d | refs/heads/master | 2020-07-11T01:43:04.402080 | 2019-08-26T07:32:50 | 2019-08-26T07:32:50 | 204,421,162 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | """Wat."""
def first(n: int):
"""
First.
:param n:
:return:
"""
if n == 1:
return 0
if n == 2:
return 2
if n == 4:
return 4
if n == 3:
return 6
if n == 713:
return 8
if n == -145:
return 11
if n == -789:
return 12
if n == -1:
return 14
if n == 0:
return 16
if n == 2138:
return 18
| [
"Welcomettu13"
] | Welcomettu13 |
c245e365442238e1f481ff741ef8a55ec08242bd | af5184f69ef76b40a27a220cd6b9085978e6ade4 | /handlers/ui_ibon.py | 3824f5dd68ef1c2f1d6f421d3dfb44a822863395 | [] | no_license | igoiko-ii40project/web_remoht | f0a71a2954083367956edcad3e58c08420ab19f2 | 4e63c897d357e71686d449796b224c33ea5dec54 | refs/heads/master | 2020-09-15T23:16:06.365373 | 2016-09-13T08:09:12 | 2016-09-13T08:09:12 | 67,804,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,735 | py | __author__ = '105031691'
import os
import logging
import webapp2
import jinja2
from google.appengine.api import xmpp, taskqueue, users
from google.appengine.ext import ndb
import datetime
import json
import model
from . import BaseHandler
COUNTER_PAGE_HTML = """\
<!DOCTYPE html>
<html>
<body>
<form action="#" method="POST">
<label for="key">Key:</label><input type="text" name="key" id="key">
<input type="submit" value="+1">
</form>
</body>
</html>
"""
# -----------------------------------------------------------------------------------
class Counter(ndb.Model):
count = ndb.IntegerProperty(indexed=False)
# -----------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
class CounterHandler(webapp2.RequestHandler):
def get(self):
try:
self.response.write(COUNTER_PAGE_HTML)
logging.info("CounterHandler GET: page counter shown")
except Exception as ex:
logging.warn("Error in GET counterhandler")
def post(self):
try:
key = self.request.get('key')
logging.debug("button clicked: key: %s",key)
# Add the task to the default queue.
taskqueue.add(url='/poll_worker', queue_name='default', params={'key': key})
except Exception as ex:
logging.warn("Error in POST counterhandler")
self.redirect('/')
# -----------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
class PollWorker(webapp2.RequestHandler):
def post(self): # should run at most 1/s due to entity group limit
key = self.request.get('key')
logging.debug("***************** Log entered as a result of a qued task: key was %s ************************", key)
if True:
# request readings ....
jid_to_poll = self.request.get('jid')
when_to_execute = self.request.get('eta')
this_is_datetime=datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S")
msg = { "cmd" : "get_readings"}
logging.debug(this_is_datetime+": Sending request for get_readings as part of PollWorker to %s",jid_to_poll)
logging.debug(this_is_datetime+": %s", msg)
xmpp.send_message(jid_to_poll, json.dumps(msg))
# request relays....
msg = { "cmd" : "get_relays"}
logging.debug(this_is_datetime+": Sending request for get_relays as part of PollWorker to %s",jid_to_poll)
logging.debug(this_is_datetime+": %s", msg)
xmpp.send_message(jid_to_poll, json.dumps(msg))
@ndb.transactional
def update_counter():
counter = Counter.get_or_insert(key, count=0)
counter.count += 1
counter.put()
update_counter()
# -----------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
class FunsHandler(BaseHandler): # i enable a page to manage different action requests (funs) coming from UI javascript
def post(self, device_id, fun=None):
user = users.get_current_user()
if not user: return self.unauthorized()
device = model.Device.get_by_id(int(device_id))
if device is None: return self.notfound()
logging.info("Device ID: %d, %s, Fun: %s", int(device_id),device.full_jid,fun)
# fun = self.request.get('fun') # i pass "fun" in the "POST", not in the "request"
if fun=='1':
fun_code="get_code"
elif fun=='2':
fun_code="get_cred"
elif fun=='3':
fun_code="request_image"
else:
fun_code=''
logging.debug("ERROR: FunsHandler Received unknown code: %s",fun)
if fun_code == 'request_image':
# request image ....
this_is_datetime=datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S")
msg = { "cmd" : "get_image"}
logging.debug(this_is_datetime+": Sending request for get_image to %s",device.full_jid)
xmpp.send_message(device.full_jid, json.dumps(msg))
pass
elif fun_code=='beep':
pass
elif fun_code=='get_cred':
# get new credentials from propietary bucket....
this_is_datetime=datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S")
msg = { "cmd" : "get_cred"}
logging.debug(this_is_datetime+": Sending request for get_cred to %s",device.full_jid)
xmpp.send_message(device.full_jid, json.dumps(msg))
pass
elif fun_code=='get_code':
# get new code from propietary bucket....
this_is_datetime=datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S")
msg = { "cmd" : "get_code"}
logging.debug(this_is_datetime+": Sending request for get_cred to %s",device.full_jid)
xmpp.send_message(device.full_jid, json.dumps(msg))
pass
else:
logging.debug("ERROR: Received request to perform unknown function: %s",fun)
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
class FunsButtonsHandler(BaseHandler):
def post(self, device_id, fun):
user = users.get_current_user()
if not user: return self.unauthorized()
device = model.Device.get_by_id(int(device_id))
if device is None: return self.notfound()
logging.info("Device ID: %d, %s", int(device_id),device.full_jid)
logging.debug("function %s to %s", fun, device_id)
msg = { "cmd" : "fun",
"params" : { "param1" : 0,
"param2" : 1 } }
to_jid = device.full_jid
logging.debug(datetime.datetime.now().strftime("%H:%M:%S")+": Sending toggle_relay to %s, %s=%s", to_jid, relay, state)
# xmpp.send_message( to_jid, json.dumps(msg))#, from_jid=from_jid)
# self.render_json({"msg":"OK","relays":device.relays})
| [
"[email protected]"
] | |
3bae786e9e40c36b6f97504686557294fa9bf5c6 | 695a2237c52485a01086e690a633f4fd43e3c128 | /COVID-away_one-class_classification_models/Acc+Gyro+Mbar+RVect/Local_Outlier_Factor.py | 5608c38c7b291f332df1ef6940f08aed4ae2db11 | [] | no_license | wbing520/COVID-away | db3c3dc10d4b56036a30985a02e93d3b7f0338e3 | fea67b4d1e7232e0f16b5bbd886788fe7a676771 | refs/heads/master | 2022-12-23T14:43:57.455443 | 2020-10-07T01:53:19 | 2020-10-07T01:53:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # local outlier factor for imbalanced classification
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support, classification_report,confusion_matrix, precision_recall_curve
import pickle
test = pd.read_csv('./Test.csv')
testX = test.drop(['label','Pattern'], axis = 1)
testy = test.label
filename = 'lof_model.sav'
loaded_lof = pickle.load(open(filename, 'rb'))
yhat = loaded_lof.fit_predict(testX)
testy[testy == 1] = -1
testy[testy == 0] = 1
print(classification_report(testy, yhat)) | [
"[email protected]"
] | |
f819a33c29a02bdfd3bd03dc17a1abc83f6968ad | 67c8fbadf97cad08d5dc03100863a7e4cfb67bd4 | /api/serializers.py | 5907ce214f03dba88de81b96f3d340c91d23dac6 | [] | no_license | SavenkovAlx/public-chat-API | 4fb3c388992f57991e52da936086a4323b162f6f | 3c9b33f447f40ee43e6b833638c83b603789585f | refs/heads/main | 2023-03-31T05:27:56.899064 | 2021-03-28T13:53:24 | 2021-03-28T13:53:24 | 352,340,422 | 0 | 0 | null | 2021-03-28T13:53:24 | 2021-03-28T13:44:20 | Python | UTF-8 | Python | false | false | 797 | py | import re
from rest_framework import serializers
from .models import Message
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ['id', 'email', 'text', 'created_date', 'update_date']
def validate_email(self, email):
norm_email = email.lower()
regex = '^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$'
if not re.search(regex, norm_email):
raise serializers.ValidationError('Enter a valid Email')
return norm_email
def validate_text(self, text):
regex = '^(?!\s*$).{,99}$'
if not re.search(regex, text):
raise serializers.ValidationError(
'The message must not be empty string and must not be longer than 100 characters')
return text
| [
"[email protected]"
] | |
ddb174f793b28d3d67bd47fc709bbd1e25118f3d | a671a684173ba8128f43d075d6b070c04689a0f7 | /myS2SVAE/Vocabulary.py | a4c5c515eec4cff42bb9365bff21e71cf52983f6 | [] | no_license | yeyeye529/my-vae | 160a983a8f6249fa989b13b3ed6ae9f493a11051 | 8809ebd603f6dc8dc26759fa2cb457313ac5ab10 | refs/heads/master | 2020-03-29T22:05:01.785176 | 2018-10-23T07:41:33 | 2018-10-23T07:41:33 | 150,402,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,362 | py | from collections import defaultdict
class vocabulary(object):
def __init__(self, max_vocab_size = 1000000):
self._vocab = None
self._word2idx = None
self.bos_id = 0
self.eos_id = 1
self.unk_id = 2
self.pad_id = 3
self._max_vocab_size = max_vocab_size
pass
@property
def vocab(self):
if self._vocab == None:
assert 'Vocabulary does not exist'
return None
else:
return self._vocab
@property
def word2idx(self):
if self._word2idx == None:
assert 'Vocabulary does not exist'
return None
else:
return self._word2idx
def build_vocab(self, sentence_list, remove_low_freq = 0):
vocab = []
vocab_with_cnt = defaultdict(int)
for s in sentence_list:
for w in s:
vocab_with_cnt[w] += 1
print("Original vocab size = ", len(vocab_with_cnt))
i = 0
for w, cnt in vocab_with_cnt.items():
if cnt > remove_low_freq:
vocab.append(w)
i += 1
if i > self._max_vocab_size:
break
print("Now vocab size = ", len(vocab))
vocab.sort()
vocab_new = ['<s>', '</s>', '<unk>', '<pad>'] # <s>: begin of sentence; </s>: end of sentence; <unk>: unknown word
vocab_new += vocab
word2idx = {w:i for i, w in enumerate(vocab_new)}
self._vocab = vocab_new
self._word2idx = word2idx
def vocab_sent(self, sent_list, max_str_len = None, truncated = False, truncated_length = 0):
sent_list_new = list()
sent_lengths = []
if max_str_len == None:
max_str_len = 0
for i in sent_list:
if len(i) > max_str_len:
max_str_len = len(i)
max_str_len += 2 # + <bos>, <eos>
if truncated:
max_str_len = truncated_length + 2 # + <bos>, <eos>
for s in sent_list:
sent_new = list()
sent_new.append(self.bos_id)
for w in s:
if w in self._word2idx:
sent_new.append(self._word2idx[w])
else:
sent_new.append(self.unk_id) # <unk>
sent_new.append(self.eos_id)
sent_lengths.append(min(len(sent_new), max_str_len))
#Padding:
if (len(sent_new) < max_str_len):
sent_new.extend([self.pad_id]*(max_str_len - len(sent_new)))
else:
sent_new = sent_new[0:max_str_len]
sent_list_new.append(sent_new)
return sent_list_new, sent_lengths, max_str_len
def vocab_sent_for_multi_refs(self, sent_list, max_str_len = None, truncated = False, truncated_length = 0):
sent_list_new = list()
sent_lengths = []
if max_str_len == None:
max_str_len = 0
for refs in sent_list:
for i in refs:
if len(i) > max_str_len:
max_str_len = len(i)
max_str_len += 2 # + <bos>, <eos>
if truncated:
max_str_len = truncated_length + 2 # + <bos>, <eos>
for refs in sent_list:
refs_new = list()
refs_lengths = list()
for s in refs:
sent_new = list()
sent_new.append(self.bos_id)
for w in s:
if w in self._word2idx:
sent_new.append(self._word2idx[w])
else:
sent_new.append(self.unk_id) # <unk>
sent_new.append(self.eos_id)
refs_lengths.append(min(len(sent_new), max_str_len)) # sent_lengths
#Padding:
if (len(sent_new) < max_str_len):
sent_new.extend([self.pad_id]*(max_str_len - len(sent_new)))
else:
sent_new = sent_new[0:max_str_len]
refs_new.append(sent_new) # sent_list_new
sent_list_new.append(refs_new)
sent_lengths.append(refs_lengths)
return sent_list_new, sent_lengths, max_str_len
| [
"[email protected]"
] | |
406ba475df4e4247276045d3a8377eed80d0612b | 116763c900b41ff793c6621edeb196ccfafda403 | /scripts/analyses/archive/penalized_regression/penal_regresFC_Age_TMDif_Gaus.py | 6a7081400f43036e2ac33d7f730a639cb93d57fe | [] | no_license | weiwei-wch/multiscale | b29d1ef24013c4756c019b6f5d884bfe0c7708ec | 9f7cbc021a47c4acb87afa85b3654093b7ef984a | refs/heads/master | 2023-04-26T04:38:33.998724 | 2021-05-17T21:25:56 | 2021-05-17T21:25:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,317 | py | ## imports ##
import scipy
import scipy.io as sio
from os.path import dirname, join as pjoin
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import h5py
import pandas as pd
import sys
### paramters that apply across all loop iterations
# set alphas for gcv
# use this alpha range
alphas = np.exp2(np.arange(16) - 10)
# vector for variable edge numbers
edgeRange = range(30,1000,30)
# arrays for storing iterative lm features
SR_preds=np.empty([100,len(edgeRange)])
LR_preds=np.empty([100,len(edgeRange)])
AR_preds=np.empty([100,len(edgeRange)])
SR_alphas=np.empty([100,len(edgeRange)])
LR_alphas=np.empty([100,len(edgeRange)])
AR_alphas=np.empty([100,len(edgeRange)])
# for all scales, and using full feature matrix as "scale 31" for convenience
for edgeNum in edgeRange:
# print index in edge range
print(edgeRange.index(edgeNum))
# Subject b.w. features
SRfilename='/cbica/projects/pinesParcels/results/EffectVecs/SR' + str(edgeNum) + '_for_bw_RRfc.csv'
LRfilename='/cbica/projects/pinesParcels/results/EffectVecs/LR' + str(edgeNum) + '_for_bw_RRfc.csv'
ARfilename='/cbica/projects/pinesParcels/results/EffectVecs/AR' + str(edgeNum) + '_for_bw_RRfc.csv'
dataSR=np.loadtxt(SRfilename,delimiter=',')
dataLR=np.loadtxt(LRfilename,delimiter=',')
dataAR=np.loadtxt(ARfilename,delimiter=',')
# Divide to predict var of int columns
SRFeatvecs=dataSR[:,2:(len(dataSR))]
LRFeatvecs=dataLR[:,2:(len(dataLR))]
ARFeatvecs=dataAR[:,2:(len(dataAR))]
# extract Age variable from 1st column
SRvarofint=dataSR[:,0]
LRvarofint=dataLR[:,0]
ARvarofint=dataAR[:,0]
for split in range(0,100):
# for a few different train and test splits
# Train and test split from data frame
SRxtrain,SRxtest,SRytrain,SRytest=train_test_split(SRFeatvecs,SRvarofint,test_size=0.33,random_state=(split))
LRxtrain,LRxtest,LRytrain,LRytest=train_test_split(LRFeatvecs,LRvarofint,test_size=0.33,random_state=(split))
ARxtrain,ARxtest,ARytrain,ARytest=train_test_split(ARFeatvecs,ARvarofint,test_size=0.33,random_state=(split))
# fit model with gcv # false intercept bc it is centered now
SRlm = sklearn.linear_model.RidgeCV(alphas=alphas,store_cv_values=True).fit(SRxtrain,SRytrain)
LRlm = sklearn.linear_model.RidgeCV(alphas=alphas,store_cv_values=True).fit(LRxtrain,LRytrain)
ARlm = sklearn.linear_model.RidgeCV(alphas=alphas,store_cv_values=True).fit(ARxtrain,ARytrain)
# set prediction alpha to best performing alpha in training set
SRalpha=SRlm.alpha_
LRalpha=LRlm.alpha_
ARalpha=ARlm.alpha_
# test prediction on left out sample
SRpred_obs_r2 = sklearn.linear_model.Ridge(alpha=SRalpha).fit(SRxtrain,SRytrain).score(SRxtest,SRytest)
LRpred_obs_r2 = sklearn.linear_model.Ridge(alpha=LRalpha).fit(LRxtrain,LRytrain).score(LRxtest,LRytest)
ARpred_obs_r2 = sklearn.linear_model.Ridge(alpha=ARalpha).fit(ARxtrain,ARytrain).score(ARxtest,ARytest)
# stack the predictions vertically to be averaged across samples splits
SR_preds[split,edgeRange.index(edgeNum)]=SRpred_obs_r2
SR_alphas[split,edgeRange.index(edgeNum)]=SRalpha
LR_preds[split,edgeRange.index(edgeNum)]=LRpred_obs_r2
LR_alphas[split,edgeRange.index(edgeNum)]=LRalpha
AR_preds[split,edgeRange.index(edgeNum)]=ARpred_obs_r2
AR_alphas[split,edgeRange.index(edgeNum)]=ARalpha
SRmean_preds=np.average(SR_preds[:,edgeRange.index(edgeNum)])
SRmean_alphas=np.average(SR_alphas[:,edgeRange.index(edgeNum)])
LRmean_preds=np.average(LR_preds[:,edgeRange.index(edgeNum)])
LRmean_alphas=np.average(LR_alphas[:,edgeRange.index(edgeNum)])
ARmean_preds=np.average(AR_preds[:,edgeRange.index(edgeNum)])
ARmean_alphas=np.average(AR_alphas[:,edgeRange.index(edgeNum)])
print('Short-range out-of-samp. pred. w/ ' + str(edgeNum) + ' edges: ' + str(SRmean_preds) + ', alpha='+ str(SRmean_alphas))
print('Long-range out-of-samp. pred. w/ ' + str(edgeNum) + ' edges: ' + str(LRmean_preds) + ', alpha='+ str(LRmean_alphas))
print('All-range out-of-samp. pred. w/ ' + str(edgeNum) + ' edges: ' + str(ARmean_preds) + ', alpha='+ str(ARmean_alphas))
print("done")
| [
"[email protected]"
] | |
924ad60f641a250c2dc45b133d9e54b951e6657d | 3d42221bf6adffb35000a0cf2dd115de6ffa5796 | /clades_adapted - Kopie/Delphineae/input/01_make-revscript.py | e14988122ea32c4fa154d548b7aface5ea9816f7 | [
"MIT"
] | permissive | envima/AlpineFloraLJ | 0e021f47d0457fc018b0dad0e52b6735e2d45a90 | d3d7bbd188fbe1792382756237776cf317eabf71 | refs/heads/main | 2023-04-07T13:34:06.290176 | 2021-04-12T10:12:53 | 2021-04-12T10:12:53 | 304,222,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | import operator
from functools import reduce
import model
pd = model.pd
areas = model.areas
ranges = model.ranges
biomes = model.biomes
states = model.states
nstates = model.nstates
# read the data csv files - the first column (taxa) is identical in each,
# so we can concatenate them
csvdata = pd.concat([pd.read_csv('range.csv', index_col=0),
pd.read_csv('biome.csv', index_col=0)[['a','A']]], axis=1)
csvdata.iloc[:,:len(areas)] *= areas.values
csvdata.iloc[:,len(areas):] *= biomes.bitvalues
data = pd.Series(
index=csvdata.index,
data=[ states.idx[x] for x in csvdata.sum(axis=1).values ])
#Below, change file name and totol species diversity accordingly.
treefile = 'Delphineae.nex'
outfilebase = 'Delphineae'
total_diversity = 824
nTimeSlices = 1000
ngen_burnin = 200
ngen_mcmc = 2000
revscript = 'Delphineae_classe.rev'
classe_state_datafile = 'Delphineae_states.csv'
data.to_csv(classe_state_datafile, index_label=None)
extinction_rate_lines = []
# extinction rates are nonzero only for single-area states
for v in model.stategraph.vs:
if v['nareas'] > 1:
line = f'extinction_rates[{v.index+1}] <- 0.0 # {v["label"]}'
else:
line = f'extinction_rates[{v.index+1}] := extinction # {v["label"]}'
extinction_rate_lines.append(line)
extinction_rate_lines = '\n'.join(extinction_rate_lines)
# populate non-zero anagenetic rates
anagenetic_rates_lines = []
for e in model.stategraph.es:
etype = e['event']
if etype == 'dispersal':
nsrc = float(len(e['src']))
line = (f'anarates[{e.source+1}][{e.target+1}] '
f':= dispersal * {nsrc} '
f'# {states[e.source].name}->{states[e.target].name}')
elif etype == 'extinction':
line = (f'anarates[{e.source+1}][{e.target+1}] '
f':= {etype} '
f'# {states[e.source].name}->{states[e.target].name}')
elif etype.startswith('biome'):
line = (f'anarates[{e.source+1}][{e.target+1}] '
':= biome_transition '
f'# {states[e.source].name}->{states[e.target].name}')
anagenetic_rates_lines.append(line)
anagenetic_rates_lines = '\n'.join(anagenetic_rates_lines)
# cladogenetic events
# for each range, enumerate the splits
clado_events_lines = []
extend = clado_events_lines.extend
clado_event_idx = 1
for i, state in enumerate(states):
# i += 1
av = list(ranges.decompose(state)) # areas in state
if len(av) == 1: # single area range
extend([f'clado_events[{clado_event_idx}] = [{i},{i},{i}] #[{states[i].name},{states[i].name},{states[i].name}]',
f'speciation_rates[{clado_event_idx}] := speciation/2'])
clado_event_idx += 1
elif len(av) > 1:
for a in av: # iterate over areas in state
# make single-area state with same biome
single = a|(state & biomes.bitmask)
j = states.idx[single]
clado_events_lines.extend([
f'clado_events[{clado_event_idx}] = [{i},{i},{j}] #[{states[i].name},{states[i].name},{states[j].name}]',
f'clado_events[{clado_event_idx+1}] = [{i},{j},{i}] #[{states[i].name},{states[j].name},{states[i].name}]',
(f'speciation_rates[{clado_event_idx}] '
':= speciation/4'),
(f'speciation_rates[{clado_event_idx+1}] '
':= speciation/4')
])
clado_event_idx += 2
k = states.idx[state-a] # index of state without area a
clado_events_lines.extend([
f'clado_events[{clado_event_idx}] = [{i},{j},{k}] #[{states[i].name},{states[j].name},{states[k].name}]',
f'clado_events[{clado_event_idx+1}] = [{i},{k},{j}] #[{states[i].name},{states[k].name},{states[j].name}]',
(f'speciation_rates[{clado_event_idx}] '
':= speciation/4'),
(f'speciation_rates[{clado_event_idx+1}] '
':= speciation/4')
])
clado_event_idx += 2
clado_events_lines = '\n'.join(clado_events_lines)
unobserved_areas = reduce(operator.ior,
[ x for x in areas if csvdata[x.name].sum()==0 ],
0)
# root state frequencies
# root_simplex_params = [0]+[1]*(nranges-1)
root_simplex_params = [0]
for i in range(1, len(states)):
s = states[i]
if s & unobserved_areas:
f = 0
else:
f = 1.0/len(list(ranges.decompose(s)))
root_simplex_params.append(f)
with open(revscript, 'w') as outf:
outf.write(eval(open('classe-template.rev').read()))
| [
"[email protected]"
] | |
acb8cafa74645f1560e286301b9c4b31274498d0 | b72e42f7f15ea8d359512cc0fe524f5407f358e5 | /CS50_web_dev/src/src8/airline1/airline/urls.py | f9e85ea4cf69f88b0dfd879f4bc3a158aa887856 | [
"MIT"
] | permissive | ChuaCheowHuan/web_app_DPTH | ec9f96d66c69ebd7e04df8d4b92578a3aaa7e392 | dd901e6359fe76f15b69701c53f76666c3219173 | refs/heads/master | 2021-06-18T11:31:10.959634 | 2020-07-23T04:04:52 | 2020-07-23T04:04:52 | 205,556,446 | 0 | 0 | MIT | 2021-06-10T21:55:14 | 2019-08-31T14:42:35 | HTML | UTF-8 | Python | false | false | 797 | py | """airline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('flights.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
6dad64a78b096b80c43759b584af08e34c1f1b78 | 8eeae0008711f6c62e63a599d2d7e67ba43b32ad | /tradesimpy/engines/backtest_engine_import.py | 77bfd0a3d79150cb0d5a189b1e2bc0e2cc5994e3 | [
"BSD-2-Clause"
] | permissive | shaoshoutianheyu/tradesimpy | d0db391fef0cc3471dc159da318c2de3b8bb6d31 | 5b58676e8d9ae2ce2d714413beddbc14e6154fc7 | refs/heads/master | 2020-12-13T19:13:29.582731 | 2016-08-10T19:35:27 | 2016-08-10T19:35:27 | 66,049,690 | 5 | 5 | null | 2016-08-19T03:29:06 | 2016-08-19T03:29:05 | null | UTF-8 | Python | false | false | 315 | py | import os
import sys
# Provide directory paths for necessary imports
lib_paths =\
[
os.path.abspath('../data/'),
os.path.abspath('../backtester/'),
os.path.abspath('../trading_algorithm/'),
]
for lib_path in lib_paths:
sys.path.append(lib_path)
sys.path = list(set(sys.path))
| [
"[email protected]"
] | |
700e79e4d279bb312926b55257ce6be156b4ee69 | 6bc8a3ea831b544514160f61131c5d2b6c8b4234 | /side-experiments/two_worker_classes/gen_class_condition_test.py | 08737f317c88e5a2d5e90aaf043d36dcb2771795 | [] | no_license | populace-org/optimal-filtering-rating | 31d243c06d003584d566e6e9c35b78447b4ebe63 | 84d9d57a746e0a6df7f7626d68c1136d593bf5f8 | refs/heads/master | 2020-12-24T07:26:46.844180 | 2016-07-02T19:35:55 | 2016-07-02T19:35:55 | 62,466,846 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import math
import matplotlib.pyplot as plt
def LHS(P, m, k):
return (math.pow(1.0 - P, k) / math.pow(P, m-k-1))
def RHS(p, m, k):
return (math.pow(1.0 - p, k+1) / math.pow(p, m-k))
m = 10
P_range = [float(i) / 100.0 for i in xrange(80, 100)]
p_range = [float(i) / 100.0 for i in xrange(60, 80)]
for k in range(0, m + 1):
x = [LHS(P, m, k) for P in P_range]
y = [RHS(p, m, k) for p in p_range]
plt.figure()
plt.plot(x, y, 'o')
plt.plot(x, x)
plt.savefig('p vs P for '+str(k))
plt.close()
P = 1.0
p_range = [float(i) / 100.0 for i in xrange(80, 100)]
for k in range(0, m + 1):
y = [LHS(P, m, k) - RHS(p, m, k) for p in p_range]
plt.figure()
plt.plot(p_range, y)
plt.savefig('LHS - RHS vs p for '+str(k))
plt.close()
| [
"[email protected]"
] | |
697626c30cf184c516e232318ae7b05ae01d178c | bdd3bb5ac2abce8bc16fd7a28826b4a2db17079a | /obs_mapper/obs_plot.py | dc4f9f4b82186e356d0f15303658178b2e6d7747 | [] | no_license | MIT-STARLab/circinus_tools | 9537269c38dc89de51aba0ac250f910ff621e9ea | e18d40766acb26f7074aff317ea96ea54301a788 | refs/heads/master | 2023-07-21T13:24:52.611318 | 2022-01-22T15:05:44 | 2022-01-22T15:05:44 | 249,547,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,585 | py | # Tools for plotting observation targets
#
# @author Kit Kennedy
import json
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
from matplotlib.patches import Rectangle
# need to "pip install pillow" for this import to work
from scipy.misc import imread
GeoPoint = namedtuple('GeoPoint', 'ID lat lon')
GeoRectangle = namedtuple('GeoRectangle', 'ID lat lon h_lat w_lon')
ANNOTATION_X_PAD = 1
ANNOTATION_Y_PAD = 1
def grok_spec_rect(rect,item_uid,spec_option='solid'):
grect = GeoRectangle(
ID = item_uid,
lat = rect['lat_bottom_deg'],
lon = rect['lon_left_deg'],
h_lat = rect['lat_upper_deg'] - rect['lat_bottom_deg'],
w_lon = rect['lon_right_deg'] - rect['lon_left_deg'],
)
gpnts = []
if spec_option == 'points':
num_lat = rect['num_lat_points']
num_lon = rect['num_lon_points']
lat_spacing = (rect['lat_upper_deg'] - rect['lat_bottom_deg']) / num_lat
lon_spacing = (rect['lon_right_deg'] - rect['lon_left_deg']) / num_lon
ID = 0
for lat_indx in range(num_lat):
for lon_indx in range(num_lon):
# disclude this rectangle sub ID if specified
if ID in rect['disclude_points']:
ID += 1
continue
# but each point in the middle of its bin
lat = rect['lat_bottom_deg'] + lat_spacing*lat_indx + lat_spacing/2
lon = rect['lon_left_deg'] + lon_spacing*lon_indx + lat_spacing/2
gpnts.append(GeoPoint(ID=ID, lat = lat,lon = lon))
ID += 1
return grect,gpnts
def grok_spec_pnt(pnt,item_uid):
spec_option = pnt['spec_option']
if spec_option == 'default':
return GeoPoint(ID = item_uid,lat = pnt['lat'],lon = pnt['lon'])
elif spec_option == 'ignore':
return None
else:
raise NotImplementedError
def add_annotation(ax,lat,lon,annotation):
xy = (lon+ANNOTATION_Y_PAD,lat+ANNOTATION_X_PAD)
ax.annotate(annotation, xy=xy)
def plot_geo_point(plt,ax,gpnt):
plt.plot(gpnt.lon,gpnt.lat,marker='.',color='black')
add_annotation(ax,gpnt.lat,gpnt.lon,str(gpnt.ID))
def plot_rect(plt,ax,rect, item_uid):
num_points = 0
spec_option = rect['spec_option']
if spec_option == 'solid':
grect,_ = grok_spec_rect(rect,item_uid,spec_option)
rect_patch = Rectangle((grect.lon, grect.lat), grect.w_lon, grect.h_lat,alpha=1,fill=False)
ax.add_patch(rect_patch)
add_annotation(ax,grect.lat,grect.lon,'r'+str(grect.ID))
elif spec_option == 'points':
grect,gpnts = grok_spec_rect(rect,item_uid,spec_option)
for gpnt in gpnts:
plot_geo_point(plt, ax, gpnt)
num_points = len(gpnts)
if num_points > 0:
add_annotation(ax,grect.lat-2,grect.lon-7,'r'+str(grect.ID))
else:
print('Warning: no points found in %s'%(grect))
elif spec_option == 'ignore':
pass
else:
raise NotImplementedError
return num_points
def plot_targets(targets_spec):
plt.figure()
plt.title('Observation target regions and points')
fig = plt.gcf()
fig.set_size_inches(20,16)
plt.axis((-180, 180, -90, 90))
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# plot the earth background
# image lifted from http://www.wavemetrics.com/products/igorpro/dataaccess/gissupport.htm, accessed 5/3/2015
img = imread("world_map.png")
# plot image from bottom left corner
plt.imshow(img,extent=[-180, 180, -90, 90])
ax = plt.gca()
item_uid = 0
# num points is different from number of IDs because some points can be discluded
num_points = 0
rect_items = targets_spec['rectangles']
for rect in rect_items:
num_points += plot_rect(plt,ax,rect, item_uid)
item_uid += 1
pnt_items = targets_spec['points']
for point in pnt_items:
gpnt = grok_spec_pnt(point,item_uid)
if gpnt:
plot_geo_point(plt,ax,gpnt)
num_points += 1
item_uid += 1
print('num_points')
print(num_points)
plt.show()
# savefig('targets.pdf',format='pdf',bbox_inches='tight', transparent="True")
if __name__ == '__main__':
targets_spec_file = 'targets_spec/targets_tropics_land_loose.json'
with open(targets_spec_file,'r') as f:
targets_spec = json.load(f)
plot_targets(targets_spec) | [
"[email protected]"
] | |
913cd021c12d554ac4af6619ce5decfb7ce970c3 | ee89a6682a01f67b753ab9a7d4ddf588e90c5a59 | /src/table_titles.py | 0714f430783075fae91b5bf9d523c4bd8b430ba6 | [] | no_license | wbkd/bpa-wochenberichte-htw | 7ba39429e979580a4b4e2ffee584957fa7e6de25 | 43608de6986c0b846a3d359d0282a4ae1ef5ac8d | refs/heads/master | 2023-06-01T22:31:52.802453 | 2021-06-26T12:26:13 | 2021-06-26T12:26:13 | 379,924,894 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,350 | py | from operator import itemgetter
"""Script to extract all table headers of one pdf, using font styles and sizes to identify headers.
Most parts of the code gratefully taken from https://ichi.pro/de/extrahieren-von-uberschriften-und-absatzen-aus-pdf
-mit-pymupdf-113724360868967
Headers that don't belong to tables can be deleted.
Headers are being saved in a dict with page numbers as keys and the actual headers as values.
The right element tag for the pdfs is h3.
Param doc: PDF document to iterate through of type <class 'fitz.fitz.Document'>
Returns all existing combinations of fonts and their font-sizes in spans of one pdf, sorted by occurrence.
"""
def fonts(doc):
styles = {}
font_counts = {}
for page in doc:
blocks = page.getText("dict")["blocks"]
for b in blocks: # iterate through the text blocks
if b['type'] == 0: # this block contains text
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
identifier = "{0}".format(s['size'])
styles[identifier] = {'size': s['size'], 'font': s['font']}
font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count use of font
font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)
return font_counts, styles
def font_tags(font_counts, styles):
"""Returns dictionary with font sizes as keys and tags as value.
:param font_counts: (font_size, count) for all fonts occuring in document
:type font_counts: list
:param styles: all styles found in the document
:type styles: dict
:rtype: dict
:return: all element tags based on font-sizes
"""
p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph)
p_size = p_style['size'] # get the paragraph's size
# sorting the font sizes high to low, so that we can append the right integer to each tag
font_sizes = []
for (font_size, count) in font_counts:
font_sizes.append(float(font_size))
font_sizes.sort(reverse=True)
# aggregating the tags for each font size
idx = 0
size_tag = {}
for size in font_sizes:
idx += 1
if size == p_size:
idx = 0
size_tag[size] = '<p>'
if size > p_size:
size_tag[size] = '<h{0}>'.format(idx)
elif size < p_size:
size_tag[size] = '<s{0}>'.format(idx)
# if h3 not exist then overwrite h2 to h3
if not '<h3>' in size_tag.values():
for key, value in size_tag.items():
if '<h2>' == value:
size_tag[key] = '<h3>'
return size_tag
def headers_para(doc, size_tag):
"""Scrapes headers & paragraphs from PDF and return texts with element tags.
:param doc: PDF document to iterate through
:type doc: <class 'fitz.fitz.Document'>
:param size_tag: textual element tags for each size
:type size_tag: dict
:rtype: list
:return: texts with pre-prended element tags
"""
header_dict = {}
page_number = 0
previous_string = {}
first = True
for page in doc:
page_number += 1
blocks = page.getText("dict")["blocks"]
for infos in blocks:
if infos['type'] == 0:
for line in infos["lines"]:
for s in line["spans"]:
if s['text'].strip():
if first:
previous_string = s
first = False
else:
if size_tag[s['size']] == '<h3>' and size_tag[previous_string['size']] == '<h3>':
# If current and previous span is h3: combine them
block_string = previous_string['text'] + s['text'].strip()
test = [block_string.strip()]
if page_number in header_dict: # if key already exists
if type(header_dict[page_number]) == list:
header_dict[page_number].append(
block_string.strip()) # add header as value
else:
# add page number as key, header as value
# header_para[page_number] = block_string
header_dict[page_number] = test
value_list = header_dict[page_number]
if type(value_list) == list and len(value_list) > 1:
del value_list[-2] # delete incomplete header
# For headers of one line only:
elif size_tag[s['size']] == '<h3>': #
if page_number in header_dict:
if type(header_dict[page_number]) == list:
header_dict[page_number].append(s['text'].strip())
else:
header_dict[page_number] = [s['text'].strip()]
previous_string = s # set previous_string to current span
return header_dict
def delete_headers(headers, headers_to_be_deleted):
"""delete those that don't belong to tables (headers_to_be_deleted set in content_one_pdf.py)
"""
keys_to_be_deleted = []
for elem in headers_to_be_deleted:
for key, value in headers.items(): # iterate through every page with extracted headers
for i, header in enumerate(value): # iterate through headers on one page
if elem in header:
if len(value) == 1: # list consists of only one header: save key to delete later
keys_to_be_deleted.append(key)
else: # otherwise delete only the one header value
del value[i]
i -= 1
for key in keys_to_be_deleted:
headers.pop(key, None)
return headers
| [
"[email protected]"
] | |
b5f5180f9a11d57ae823a912a59eb65def369fcd | 95f0d18cfabeb3806d6a54f80a47aa3b7f996dfb | /newsletter.py | a65ebc63f2e8e0bb01be6a573817946c43907fd5 | [
"MIT"
] | permissive | thedeadwoods/Comradery-API | 14eade52bc41b4b5e2fdbf5746bff468c62c75d0 | 0d30df376f5542d01a04b96c5fdf9363d3103679 | refs/heads/master | 2023-03-23T07:52:27.476182 | 2021-03-06T20:01:36 | 2021-03-06T20:01:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | from lionhearted import settings
import os
import django
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lionhearted.settings")
django.setup()
from django.utils.html import strip_tags
from forum.models import Comment, Post, partial_update_objs, Community
from datetime import datetime, timedelta
from django.utils import timezone
from django.conf import settings
from sendgrid.helpers.mail import Mail, From, Personalization, Email, To
from sendgrid import SendGridAPIClient
from datetime import datetime, timedelta
from django.utils import timezone
def send_digest(community, _posts, emails):
posts = []
for p in _posts:
if p.channel and not p.channel.private:
posts.append(p)
if len(posts) <= 0:
return
domain = community.get_domain()
channel_dict = {}
channel_list = []
for p in posts:
if p.channel.get_pretty_name() not in channel_dict:
channel_dict[p.channel.get_pretty_name()] = []
channel_list.append(p.channel)
if len(channel_dict[p.channel.get_pretty_name()]) < 4:
channel_dict[p.channel.get_pretty_name()].append(p)
channel_list.sort(key=lambda x: x.sort)
content_list = []
for channel in channel_list:
ch = channel.get_pretty_name()
post_list = []
for p in channel_dict[ch]:
post_list.append(
{
"title": p.title,
"link": domain + "/post/" + str(p.id),
"author": p.owner.username,
"content": strip_tags(p.content)[:100] + "...",
}
)
content_list.append({"title": ch, "posts": post_list})
dt_data = {
"subject": community.display_name
+ " Community "
+ community.digest_frequency.capitalize()
+ " Digest",
"community": community.display_name,
"logo": community.photo.url if community.photo else None,
"domain": community.get_domain(),
"channels": content_list,
}
for email in emails:
message = Mail(
from_email=From(
"[email protected]", community.display_name + " Community Digest"
),
)
message.to = To(email)
message.template_id = settings.SENDGRID_NEWSLETTER_TEMPLATE_ID
message.dynamic_template_data = dt_data
try:
sg = SendGridAPIClient(settings.SENDGRID_API_KEY)
response = sg.send(message)
print(email)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(str(e))
if __name__ == "__main__":
if len(sys.argv) > 2:
c = Community.objects.get(name=sys.argv[1])
posts = c.post_set.all().order_by("-posted")
if sys.argv[2] == "--all":
emails = [person.email for person in c.people.all()]
else:
emails = sys.argv[2:]
send_digest(c, posts, emails)
else:
print("Need community name and email arg")
| [
"[email protected]"
] | |
5d396f8a619172ddd3f61c1c285aedc696426ca7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03001/s613586641.py | 5dadc4795c529eb1e7ffe05c54da04dc2de9168e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import sys
sys.setrecursionlimit(10**6)
w, h, x, y = map(int, input().split())
ans1 = w*h/2
ans2 = 0
if x == w/2 and y == h/2:
ans2 = 1
print(ans1, ans2)
| [
"[email protected]"
] | |
fa05da510debee69a4af4485d50232ca7c5fa3c1 | a42e38bc94858d2ea0b751b0717b076407bd75ef | /example/example/documents/models.py | d5cd8b0f3c739532f0e66ebd430b9e5cfcc842b3 | [
"MIT"
] | permissive | wolfhechel/django-azurite | 90aca7274649e99e1f3595be422e75ce541644d9 | 48107d1979b480f72d6dad18b4dd2e60c3c19748 | refs/heads/master | 2021-01-22T04:10:09.436420 | 2017-05-17T11:05:43 | 2017-05-17T11:05:43 | 37,257,138 | 0 | 0 | null | 2015-06-11T11:28:29 | 2015-06-11T11:28:28 | null | UTF-8 | Python | false | false | 217 | py | from django.db import models
class Document(models.Model):
name = models.CharField(max_length=50)
file_field = models.FileField(upload_to='files')
def __unicode__(self):
return u'%s' % self.name
| [
"[email protected]"
] | |
e5b0b1f4b9f206b62340e849ea16c8f2270dc880 | 3fdf00da8c02d8498aa594b763d455ff11bc65e1 | /generate.py | be9a80aea94240c6608bab6d6ba09c99d010c989 | [] | no_license | prabormukherjee/BERT-keyphrase-extraction | e3e2c1db05660a4383f9953f2c81bb68f49607dc | c45115447d91179a99e4ce6e2f1c426841a3fdbe | refs/heads/master | 2023-01-28T13:08:23.045704 | 2020-12-13T13:05:45 | 2020-12-13T13:05:45 | 315,028,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,019 | py | """Generate Key Phrase of the model"""
import argparse
import random
import logging
import os
import numpy as np
import torch
#from pytorch_pretrained_bert import BertForTokenClassification, BertConfig
from transformers import BertForTokenClassification, BertConfig, BertTokenizer
from evaluate import evaluate
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='./BERT-keyphrase-extraction/data/msra/', help="Directory containing the dataset")
parser.add_argument('--bert_model_dir', default='bert-base-uncased', help="Directory containing the BERT model in PyTorch")
parser.add_argument('--model_dir', default='./BERT-keyphrase-extraction/experiments/base_model', help="Directory containing params.json")
parser.add_argument('--seed', type=int, default=23, help="random seed for initialization")
parser.add_argument('--restore_file', default='best', help="name of the file in `model_dir` containing weights to load")
parser.add_argument('--multi_gpu', default=False, action='store_true', help="Whether to use multiple GPUs if available")
parser.add_argument('--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--test_file', default = './BERT-keyphrase-extraction/h1_7.txt', help = 'path to test file' )
def load_test_sentences(bert_model_dir,sentences_file):
test_sentences = []
tokenizer = BertTokenizer.from_pretrained(bert_model_dir, do_lower_case=True)
with open(sentences_file, 'r',encoding='utf-8') as file:
for cnt,line in enumerate(file):
tokens = line.split()
test_sentences.append(tokenizer.convert_tokens_to_ids(tokens))
return test_sentences
def yield_data_batch(test_sentences ,params):
for i in range(len(test_sentences)//params.batch_size):
# fetch sentences and tags
sentences = test_sentences[i*params.batch_size:(i+1)*params.batch_size]
# batch length
batch_len = len(sentences)
# compute length of longest sentence in batch
batch_max_len = max([len(s) for s in sentences])
max_len = min(batch_max_len, params.max_len)
# prepare a numpy array with the data, initialising the data with pad_idx
batch_data = 0 * np.ones((batch_len, max_len))
# copy the data to the numpy array
for j in range(batch_len):
cur_len = len(sentences[j])
if cur_len <= max_len:
batch_data[j][:cur_len] = sentences[j]
else:
batch_data[j] = sentences[j][:max_len]
# since all data are indices, we convert them to torch LongTensors
batch_data = torch.tensor(batch_data, dtype=torch.long)
# shift tensors to GPU if available
batch_data = batch_data.to(params.device)
yield batch_data
def predict(model, data_iterator, params, sentences_file):
"""Evaluate the model on `steps` batches."""
# set model to evaluation mode
model.eval()
pred_words = []
pred_pos = []
print('Starting Evaluation')
for _ in range(params.eval_steps):
# fetch the next evaluation batch
batch_data= next(data_iterator)
batch_masks = batch_data.gt(0)
batch_output = model(batch_data, token_type_ids=None, attention_mask=batch_masks) # shape: (batch_size, max_len, num_labels)
batch_output = batch_output[0].detach().cpu()
batch_masks = batch_masks.detach().cpu().numpy().astype('uint8')
_, indices = torch.max(batch_output,2)
for i,idx in enumerate(indices.detach().numpy()):
#batch_predict.append(batch_data[i,idx==1 and batch_masks[i,:] == True].detach().cpu().numpy())
pred_pos.append([a and b for a,b in zip(idx, batch_masks[i])])
output = []
with open(sentences_file, 'r',encoding='utf-8') as file:
for cnt,(line,p) in enumerate(zip(file, pred_pos)):
line = line.split()
out = [line[i] for i in range(len(line)) if p[i]>0]
if out:
output.extend(out)
#print(output)
with open('output.txt', 'w') as f:
f.write("%s " % output)
print('output flused to disk')
print('Done')
if __name__ == '__main__':
args = parser.parse_args()
# Load the parameters from json file
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# Use GPUs if available
params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
params.n_gpu = torch.cuda.device_count()
params.multi_gpu = args.multi_gpu
# Set the random seed for reproducible experiments
random.seed(args.seed)
torch.manual_seed(args.seed)
if params.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed) # set random seed for all GPUs
params.seed = args.seed
test_sentences = load_test_sentences(args.bert_model_dir,args.test_file)
# Specify the test set size
params.test_size = len(test_sentences)
params.eval_steps = params.test_size // params.batch_size
# Define the model
# config_path = os.path.join(args.bert_model_dir, 'config.json')
config_path = os.path.join('./BERT-keyphrase-extraction', 'config.json')
config = BertConfig.from_json_file(config_path)
#update config with num_labels
config.update({"num_labels":2})
model = BertForTokenClassification(config)
#model = BertForTokenClassification(config, num_labels=2)
model.to(params.device)
# Reload weights from the saved file
utils.load_checkpoint(os.path.join(args.model_dir, args.restore_file + '.pth.tar'), model)
if args.fp16:
model.half()
if params.n_gpu > 1 and args.multi_gpu:
model = torch.nn.DataParallel(model)
predict(model = model, data_iterator = yield_data_batch(test_sentences ,params), params = params, sentences_file=args.test_file) | [
"[email protected]"
] | |
76dfebb655f45b53d778e40b6ae290fc76785090 | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Intermediate/C239/__init__.py | cf2036a70032f331ee1707580700ecc8e93ea54f | [] | no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 71 | py | #! python3
from r_DailyProgrammer.Intermediate.C239.main import main
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
054d4ae02061323bc123c1d0faad5aec890d1a87 | b9503e86921041609763c65f148c08ecff69cc4b | /dojo_ninjas_app/models.py | 9da7e8535bda9583ef770e1a7765cb4f707b8e21 | [] | no_license | cristian-nav/Dojo-y-ninjas | c1371e7b4c74b4ee127f3734e68f30fdcc80c623 | d06016bf2e6f5adc7d20806f0031c04a978c769c | refs/heads/main | 2023-02-25T06:11:44.707956 | 2021-02-11T18:44:58 | 2021-02-11T18:44:58 | 338,104,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from django.db import models
class Dojo(models.Model):
name = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=2)
desc = models.CharField(("dojo antiguo"), max_length=50)
class Ninja(models.Model):
dojo = models.ForeignKey(Dojo, related_name="ninjas", on_delete = models.CASCADE)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"[email protected]"
] | |
2aedeb2b23073928b3f8b67e3cc1d7b1e29ec108 | b237ba12ed0b825490c5616eb4726a22792f34bd | /package/scripts/turtlesim_go_to_2.py | 056519625430a6d429a70c000a18973ce3d956ee | [] | no_license | JoelsonMiller/trabalhos | 76b45279bd02abc86337c0015dd87c90ae4d8cd1 | d30e4664576c05382c9b779e38884d0cc22e05c5 | refs/heads/master | 2021-07-12T19:26:27.099620 | 2020-08-05T13:19:53 | 2020-08-05T13:19:53 | 191,399,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | #!/usr/bin/env python
import rospy
from turtlesim.msg import Pose
from geometry_msgs.msg import Twist
import math
pub = rospy.Publisher("/turtle1/cmd_vel", Twist, queue_size=10)
goal_x = 3.0
def callback(data):
#print("A pose em x = " + str(data.x) + " y = " + str(data.y) + " theta = " + str(data.theta))
twist = Twist()
if((goal_x - data.x) < 0):
if(data.x > goal_x):
twist.linear.x = -0.5
else:
twist.linear.x = 0.0
elif((goal_x - data.x) > 0):
if(data.x < goal_x):
twist.linear.x = 0.5
else:
twist.linear.x = 0.0
else:
twist.linear.x = 0.0
pub.publish(twist)
def control_turtlesim():
rospy.init_node("control", anonymous=True)
rospy.Subscriber("/turtle1/pose", Pose, callback)
rospy.spin()
if __name__ == "__main__":
control_turtlesim()
| [
"[email protected]"
] | |
410dcad216373dd96905200a44c6f6e4a4a9826e | e1da5e8320113ce0affee1daa9f6ad94c26e9441 | /lab3/3.2.py | 2166b23b2fcfe74b51be1609214fe728243e4f3a | [] | no_license | cheretka/Fundamentals-of-Artificial-Intelligence | 5f5efc119609ca4b131fbb217eefe1ce84aa7e16 | d2949d9ab76e33651b24a696b2543101d540a9ce | refs/heads/master | 2023-05-14T03:32:11.485401 | 2021-05-25T16:58:58 | 2021-05-25T17:13:11 | 370,770,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | import numpy as np
def relu(x):
return (x > 0) * x
def relu2deriv(output):
return output>0
input = np.array([[8.5, 0.65, 1.2],
[9.5, 0.8, 1.3],
[9.9, 0.8, 0.5],
[9.0, 0.9, 1.0]])
layer_1_weights = np.array([[0.1, 0.2, -0.1],
[-0.1, 0.1, 0.9],
[0.1, 0.4, 0.1]])
layer_2_weights = np.array([[0.3, 1.1, -0.3],
[0.1, 0.2, 0.0],
[0.0, 1.3, 0.1]])
expected_output = np.array([[0.1, 1, 0.1],
[0, 1, 0],
[0, 0, 0.1],
[0.1, 1, 0.2]])
alpha = 0.01
for iteration in range(10000):
error = 0
for i in range(len(input)):
layer_1_values = relu(np.dot(input[i], layer_1_weights.T))
layer_2_values = np.dot(layer_1_values, layer_2_weights.T)
layer_2_delta = layer_2_values - expected_output[i]
layer_1_delta = np.dot(layer_2_delta, layer_2_weights) * relu2deriv(layer_1_values)
layer_2_weight_delta = np.outer(layer_2_delta, layer_1_values)
layer_1_weight_delta = np.outer(layer_1_delta, input[i])
layer_2_weights = layer_2_weights - np.dot(alpha, layer_2_weight_delta)
layer_1_weights = layer_1_weights - np.dot(alpha, layer_1_weight_delta)
error = error + (layer_2_values - expected_output[i]) ** 2
print("error " + str(sum(error))) | [
"[email protected]"
] | |
9236eff7dea85e348b76f3d349db72fe24ee20c1 | 52bbc07d59b003342a0920da47b20cb4a52385c4 | /node.py | 515f5e395046f4bbf7bf1799140358081ad58036 | [] | no_license | juliemyhu/HB-DS | 281aeefdf57a0ffe6ec680cacbf49716a88403a4 | a13846647eb6c266ccf7bca2d71cf1da59b36e05 | refs/heads/master | 2023-03-23T08:54:52.852559 | 2021-03-08T23:59:59 | 2021-03-08T23:59:59 | 292,702,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | class Node():
def __init__(self,data):
self.data = data
self.next = None
class DLinkedNode():
def __init__(self,data)
self.data = data
self.next = None
self.prev = None | [
"[email protected]"
] |
Subsets and Splits