blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51703a989853c75d13edcea9e53aa6ece050f724 | 2324dea2cb3003c8ab7e8fd80588d44973eb8c77 | /Euler_1_12.py | 5734c779f4c0af8eadd33888278b66b26ca706d5 | [] | no_license | MikeOcc/MyProjectEulerFiles | 5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56 | 4d066d52380aade215636953589bf56d6b88f745 | refs/heads/master | 2021-01-16T18:45:44.133229 | 2015-05-27T18:28:43 | 2015-05-27T18:28:43 | 5,876,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py |
sum=0
x = 500*499/2
maxum = 0
maxsum = 0
bestrow = 0
rstart = 12375
estart = 12377
showall = False
for r in range(rstart,estart):
trinum = r*(r+1)/2
#trinum = r
sum=0
if trinum%2==0:
endrange = (trinum/2) + 1
elif trinum%3==0:
endrange = (trinum/3) + 1
elif trinum%5==0:
endrange = (trinum/5) + 1
# elif trinum%7==0:
# endrange = (trinum/7) + 1
# elif trinum%9==0:
# endrange = (trinum/9) + 1
else:
continue
print r
for i in range(1,endrange):
#z=float(trinum)/float(i)
#print i, "z = ",z
if trinum%i==0:
sum = sum + 1
#print i,float(trinum)/float(i)
sum = sum + 1
#print "maxsum ", maxsum
if showall == True or sum > maxsum:
print "\nRow:", r, ",triangle number :",trinum, ", number of factors:",sum
if sum > maxsum:
maxsum = sum
maxum = trinum
bestrow = r
if maxsum > 500:break;
print "\nFor r =", bestrow, ", the number of factors =",maxsum, "for triangle number =", maxum
#6720 - 384, 6546 | [
"[email protected]"
] | |
9c47603886a4c6ed2a21d6996118f3276f77d34b | 770f7b7155c33d2f8c27846b93b9b73db45b2e2a | /gofedinfra/system/helpers/artefactkeygenerator/golangprojectdistributioncontracts.py | fa997f75dbfb7f4e5d02dc12ec4815bd664b40c5 | [] | no_license | gofed/infra | b0f6186486e8aa7c8c640411ee92d6648cbc77ec | 2f402bbdf1e5fa7cb68262cc3408a2fc1436269f | refs/heads/master | 2022-10-16T02:46:09.226939 | 2018-06-07T23:16:44 | 2018-06-08T11:31:37 | 48,703,326 | 1 | 5 | null | 2022-10-11T11:17:16 | 2015-12-28T17:08:28 | Python | UTF-8 | Python | false | false | 503 | py | from infra.system.core.meta.metaartefactkeygenerator import MetaArtefactKeyGenerator
import logging
class GolangProjectDistributionContractsKeyGenerator(MetaArtefactKeyGenerator):
def generate(self, data, delimiter = ":"):
# return a list of fields
keys = []
for key in ["artefact", "product", "distribution", "build", "rpm"]:
if key not in data:
raise ValueError("golang-project-distribution-contracts: %s key missing" % key)
keys.append(self.truncateKey(data[key]))
return keys
| [
"[email protected]"
] | |
badfda65db90fba0d138df86bdf89c553f163981 | 1ec1e418fc5c9aac055c9218f1074332adf1e720 | /rand_param_envs/gym/envs/classic_control/mountain_car.py | fd564b1a6e7c7322b34b749b615ac33353677373 | [] | no_license | CHEN-yongquan/mier_public | 344e34137343aa564b261c7125edac3b3ff10eb0 | af56fa84811dc7a697feb1b9dff01836d2148810 | refs/heads/master | 2022-10-15T13:21:35.198458 | 2020-06-12T08:22:16 | 2020-06-12T08:22:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,400 | py | """
https://webdocs.cs.ualberta.ca/~sutton/MountainCar/MountainCar1.cp
"""
import math
from rand_param_envs import gym
from rand_param_envs.gym import spaces
from rand_param_envs.gym.utils import seeding
import numpy as np
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.low = np.array([self.min_position, -self.max_speed])
self.high = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self._seed()
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action - 1) * 0.001 + math.cos(3 * position) * (-0.0025)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position == self.min_position and velocity < 0): velocity = 0
done = bool(position >= self.goal_position)
reward = -1.0
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def _reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return np.sin(3 * xs) * .45 + .55
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width / world_width
carwidth = 40
carheight = 20
if self.viewer is None:
from rand_param_envs.gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position - self.min_position) * scale
flagy1 = self._height(self.goal_position) * scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)])
flag.set_color(.8, .8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation((pos - self.min_position) * scale, self._height(pos) * scale)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
| [
"[email protected]"
] | |
07e5017f2acb0a1965c7ecaf4bc866b332dd9c41 | 09d3599c8e53b38104e96d479b2b40ac549d0bef | /Rakesh/merge-sorting/merge_sort_code.py | fdcfc5be250ad399af146275c3c7b1928f480b15 | [] | no_license | rakeshsukla53/interview-preparation | 54764c83c86a52c566899ec87d74dad84216764d | 09355094c85496cc42f8cb3241da43e0ece1e45a | refs/heads/master | 2016-09-06T02:08:50.436414 | 2016-02-01T00:31:52 | 2016-02-01T00:31:52 | 40,916,511 | 9 | 3 | null | 2015-12-31T05:00:55 | 2015-08-17T17:59:55 | HTML | UTF-8 | Python | false | false | 831 | py | def mergeSort(alist):
print("Splitting ",alist)
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i = 0
j = 0
k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
print("Merging ",alist)
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
mergeSort(alist)
print(alist)
| [
"[email protected]"
] | |
9fe70e6e9311515a57fe7f9f89e914f226914708 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200912b_python2m6/day14_201212/homework/kevin/stem1402b_python_homework_12_KEvin (1).py | df9bca9af96dc347be56451b3a84d2e749a06e03 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | """
1. Read an HTML file, and copy all content into a new file
2. Read a CSV file, and copy all content into a new file
3. Read a CSV file, and copy its first half lines into a new file
"""
# Question 1.
file = open("myweb.html")
content = file.read()
file.close()
file = open("myweb.txt", 'w')
file.write(content)
file.close()
print("Content has been written")
# Question 2.
file = open("business-price-indexes-september-2020-quarter-corrections-to-previously-published-statistics.csv")
content = file.read()
file.close()
file = open("business-price-indexes-september-2020-quarter-corrections-to-previously-published-statistics.txt", 'w')
file.write(content)
file.close()
print("Content has been written")
# Question 3.
file = open("business-price-indexes-september-2020-quarter-corrections-to-previously-published-statistics.csv")
num = len(file.readlines())
if num % 2 == 0:
line = int(num / 2)
print(line)
elif num % 2 == 1:
line = int(num + 1)
line = int(line / 2)
print(line)
file.seek(0)
file2 = open("csvmodified.txt", 'w')
file2.close()
for i in range(line):
content = file.readline()
file3 = open("csvmodified.txt", 'a')
file3.write(content)
file3.close()
file.close()
print("Content has been written")
| [
"[email protected]"
] | |
3698d09b3ef585be930e2cdaae23a99451a3fe60 | 6a95b330e1beec08b917ff45eccfd6be3fd4629f | /kubernetes/client/models/v1beta1_pod_disruption_budget.py | 1f4a7e680204daa09f3226b789c49eddb7556fd5 | [
"Apache-2.0"
] | permissive | TokkoLabs/client-python | f4a83d6540e64861b59e322c951380a670578d7f | f1ad9c6889105d8510472606c98f8d3807f82020 | refs/heads/master | 2023-07-14T01:36:46.152341 | 2017-12-21T21:32:11 | 2017-12-21T21:32:11 | 115,042,671 | 0 | 0 | Apache-2.0 | 2021-08-06T03:29:17 | 2017-12-21T20:05:15 | Python | UTF-8 | Python | false | false | 7,321 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1PodDisruptionBudget(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1PodDisruptionBudgetSpec',
'status': 'V1beta1PodDisruptionBudgetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1PodDisruptionBudget - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1PodDisruptionBudget.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1PodDisruptionBudget.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1PodDisruptionBudget.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1PodDisruptionBudget.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1PodDisruptionBudget.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1PodDisruptionBudget.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1PodDisruptionBudget.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1PodDisruptionBudget.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1PodDisruptionBudget.
:return: The metadata of this V1beta1PodDisruptionBudget.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1PodDisruptionBudget.
:param metadata: The metadata of this V1beta1PodDisruptionBudget.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1PodDisruptionBudget.
Specification of the desired behavior of the PodDisruptionBudget.
:return: The spec of this V1beta1PodDisruptionBudget.
:rtype: V1beta1PodDisruptionBudgetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1PodDisruptionBudget.
Specification of the desired behavior of the PodDisruptionBudget.
:param spec: The spec of this V1beta1PodDisruptionBudget.
:type: V1beta1PodDisruptionBudgetSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1PodDisruptionBudget.
Most recently observed status of the PodDisruptionBudget.
:return: The status of this V1beta1PodDisruptionBudget.
:rtype: V1beta1PodDisruptionBudgetStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1PodDisruptionBudget.
Most recently observed status of the PodDisruptionBudget.
:param status: The status of this V1beta1PodDisruptionBudget.
:type: V1beta1PodDisruptionBudgetStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1PodDisruptionBudget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
2f7b470ae7b0fec585de036aeb4572515b45c7cc | 3fd8fd35d61d997b586e40ed8d938805ce5fdf3b | /Nonsense/sqlite3_select_variations.py | 0e0b6abd418f5905d994320c035d6cff5d79e1f4 | [] | no_license | ChocolatePadmanaban/Cooking_Scheduler | 8afd967cd5128b15c9865aa44ae3d298ee3027ad | 3cd91009e68064f92408fb5bba55519ba77767c3 | refs/heads/master | 2023-01-03T10:55:25.306425 | 2020-11-01T07:13:50 | 2020-11-01T07:13:50 | 260,551,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import sqlite3
db_filename = 'todo.db'
with sqlite3.connect(db_filename) as conn :
cursor = conn.cursor()
cursor.execute("""
select name, description, deadline from project
where name = 'pymotw'
""")
name, description, deadline = cursor.fetchone()
print('\nProject details for {} ({}) \n due {}'.format(
description, name, deadline
))
cursor.execute("""
select id, priority, details, status, deadline from task
where project = 'pymotw' order by deadline
""")
print('\nNext 5 tasks')
for row in cursor.fetchmany(5):
task_id, priority, details, status, deadline = row
print('{:2d} [{:d}] {:<25} [{:<8}] ({})'.format(
task_id, priority, details, status, deadline
)) | [
"[email protected]"
] | |
b1b0836558fca7fbe061c4eb80f07882c4fc2efe | abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9 | /171-excel sheet column number.py | ecf0f4089ecebf1bb3c5d9513e327b54f6b2dd8b | [] | no_license | JinnieJJ/leetcode | 20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272 | 26c6ee936cdc1914dc3598c5dc74df64fa7960a1 | refs/heads/master | 2021-04-15T09:18:08.450426 | 2021-03-06T01:53:27 | 2021-03-06T01:53:27 | 126,275,814 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
base = ord('A') - 1
n = len(s)
result = 0
for i in range(n):
result += (ord(s[n - 1 - i]) - base) * pow(26, i)
return result
| [
"[email protected]"
] | |
2085ad269f187efe33fbf9c09c32fd5e88473c84 | 60d5b5b1f1c912d1655de3884efc09dfddd8d132 | /sites/kotourism/interop/admin.py | 6386d5b15eb775e20b5d54615fdec0aabf661348 | [] | no_license | alexgula/django_sites | 15033c739401f24603e957c5a034d63652f0d21f | 038834c0f544d6997613d61d593a7d5abf673c70 | refs/heads/master | 2016-09-05T11:02:43.838095 | 2014-07-07T11:36:07 | 2014-07-07T11:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | # coding=utf-8
from django.contrib import admin
from .models import Log
class LogAdmin(admin.ModelAdmin):
list_display = ('action_date', 'success', 'results', )
search_fields = ('results', )
date_hierarchy = 'action_date'
list_filter = ('success', )
admin.site.register(Log, LogAdmin)
| [
"[email protected]"
] | |
2b5c20e9871a400e3e7f65eacec72607e68aa9f6 | 5de0c9652b13fc57009626bfdec4c6c761ec450c | /test/test_app.py | af68128fa5a510cf84ced78968e43c33f2f9cfa3 | [] | no_license | Quastrado/project_o_mail_service | 7e845e5c6a70db0a3c32a1dde52e26dc8f1450f0 | 4de6e6e52d6fa1955372fbf2ba9408bad8955863 | refs/heads/master | 2023-02-05T08:26:27.726227 | 2020-03-18T14:45:12 | 2020-03-18T14:45:12 | 196,257,710 | 2 | 0 | null | 2023-02-02T06:38:38 | 2019-07-10T18:40:00 | Python | UTF-8 | Python | false | false | 540 | py | import pytest
from flask_sqlalchemy import SQLAlchemy
from owl_mail import create_app
from owl_mail.models import User
@pytest.fixture
def app():
app=create_app()
return app
def test_app(client):
response = client.get('/menu')
assert response.status_code == 200
@pytest.fixture(scope='module')
def init_database():
db.create_all()
user1 = User('Ghost', 'invisible', 'admin')
user2 = User('Vagabound', 'danger', 'user')
db.session.add(user1)
db.session.add(user2)
yield db
db.drop_all() | [
"[email protected]"
] | |
3e39a7286d81302fbf3055a8e211ba990ae3791b | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/PyStringFormatInspection/NewStylePackedFunctionCall.py | 9975f8d86d2a4696b19a165fee177feccb261cce | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 132 | py | def f():
return dict(foo=0)
'{foo}'.format(**f())
<warning descr="Too few arguments for format string">"{}"</warning>.format() | [
"[email protected]"
] | |
675c93c2b83444bcbb09534e5f4c2034726bac37 | 86f22c67e65438948b982663f8b72a29090504a2 | /fem-sim/utils/compute_tf_jacobian_models.py | e81972fcb7bfee25c67fd9c3d6446709ca2b27b8 | [] | no_license | itsvismay/research-experiments | 2738270859db259d917e2baf8a6af4115c195d8f | 4e49063f9fa53eda156e5cd5ded9c1caf45170ca | refs/heads/master | 2021-09-03T23:01:40.704813 | 2018-01-11T22:37:54 | 2018-01-11T22:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | import numpy
def generate_jacobian_for_tf_model(model_input_path, jacobian_output_path):
import tensorflow as tf
from tensorflow_forward_ad import forward_gradients
from tensorflow.python.platform import gfile
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.util import compat
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
def body(y, x, i):
n = tf.shape(y)[0]
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, jacobian = tf.while_loop(
lambda j, _: j < n,
lambda j, result: (j+1, result.write(j, tf.gradients(y[j], x)[0][i])),
loop_vars)
return jacobian.stack()
def tf_jacobian(y, x, n):
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, jacobian = tf.while_loop(
lambda i, _: i < n,
lambda i, result: (i+1, result.write(i, body(y[i], x, i))),
loop_vars)
return jacobian.stack()
with gfile.FastGFile(model_input_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
sess = tf.Session()
input_node = sess.graph.get_tensor_by_name("decoder_input:0")
output_node = sess.graph.get_tensor_by_name("output_node0:0")
jacobians = tf_jacobian(output_node, input_node, 1)
# tf.train.write_graph(jacobians.as_graph_def(), "./", "test_jac")
# from tensorflow.python.framework import graph_util
# from tensorflow.python.framework import graph_io
# # print("pred_node_names", pred_node_names)
# constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), jacobians.name)
# graph_io.write_graph(constant_graph, output_fld, output_path, as_text=False)
# print('saved the freezed graph (ready for inference) at: ', osp.join(output_fld, output_path))
#print(sess.graph.as_graph_def())
subgraph = tf.graph_util.extract_sub_graph(sess.graph.as_graph_def(), ["decoder_input", jacobians.name[:-2]])
graph_io.write_graph(subgraph, "./", jacobian_output_path, as_text=False)
# print(subgraph)
# print(jacobians.name)
# print(output_node.name)
| [
"[email protected]"
] | |
89efbf5e371a384b3aa9d461d00aaf07b1198537 | e457376950380dd6e09e58fa7bee3d09e2a0f333 | /plugin/src/test/resources/highlighting/assignmentTargetWith.py | f516b41c6b1a48f24989f6489055467af0259ba0 | [
"Apache-2.0"
] | permissive | consulo/consulo-python | b816b7b9a4b346bee5d431ef6c39fdffe40adf40 | e191cd28f043c1211eb98af42d3c0a40454b2d98 | refs/heads/master | 2023-08-09T02:27:03.585942 | 2023-07-09T08:33:47 | 2023-07-09T08:33:47 | 12,317,018 | 0 | 0 | Apache-2.0 | 2020-06-05T17:16:50 | 2013-08-23T07:16:43 | Java | UTF-8 | Python | false | false | 83 | py | with open("") as <error descr="can't assign to operator">my_<<var</error>:
pass | [
"[email protected]"
] | |
ffa75cb32ceab6e5171f6dd2ae16fc7e89d527af | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /imooc/python3_shizhan/nine/__init__.py | 7152865099aa16024441ce4dd7cec00450259742 | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = 'TesterCC'
# __time__ = '17/10/30 23:58' | [
"[email protected]"
] | |
918a53cc6d7de7ff0e39d6779f5f5f5f0b340572 | 27c27208a167f089bb8ce4027dedb3fcc72e8e8a | /ProjectEuler/UC solutions/Successful 101-150/Q107.py | be8c9a5ca40b89e684d9ebaf533ae69bec056b9f | [] | no_license | stankiewiczm/contests | fd4347e7b84c8c7ec41ba9746723036d86e2373c | 85ed40f91bd3eef16e02e8fd45fe1c9b2df2887e | refs/heads/master | 2021-05-10T16:46:41.993515 | 2018-02-16T09:04:15 | 2018-02-16T09:04:15 | 118,587,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | from Numeric import *
#NET = [zeros(40)]*40;
TOT = 0; netses = []; NETS = [];
for line in file("../TXTdata/network.txt"):
netses.append(line);
for line in netses:
sq = list(); N = 0;
for ch in line:
if (ch in ['0','1','2','3','4','5','6','7','8','9']):
N = 10*N+int(ch);
if (ch == ','):
if (N == 0):
sq.append(0);
else:
sq.append(N);
TOT += N;
N = 0;
if (N != 0):
sq.append(N);
TOT += N;
NETS.append(sq);
print NETS
Cnt = 0;
CNC = [];
for A in arange(len(NETS)):
CNC.append([]);
for B in arange(len(NETS)):
CNC[A].append(0);
CNC[A][A] = 1;
LAST = 1; DoneL = list(); Good = True; NEWL = 0;
while (Cnt < len(NETS)-1):
MIN = 10000; Mi = 0; Mj = 0;
for i in arange(len(NETS)):
for j in arange(i):
if (NETS[i][j] < MIN) and (NETS[i][j] >= LAST):
if (100*i+j) not in DoneL:
MIN = NETS[i][j];
Mi = i;
Mj = j;
if (CNC[Mi][Mj] == 0):
CNC[Mi][Mj] = 1; CNC[Mj][Mi] = 1;
print Cnt,":", Mi,Mj,MIN;
Cnt += 1;
DoneL.append(100*Mi+Mj)
NEWL += MIN;
for a in arange(len(NETS[Mi])):
for b in arange(len(NETS[Mj])):
if (CNC[a][Mi] == 1) and (CNC[b][Mj] == 1):
CNC[a][b] = 1; CNC[b][a] = 1;
else:
print "Completed a failed pass", MIN, Mi, Mj
DoneL.append(100*Mi+Mj)
print TOT/2, NEWL, TOT/2-NEWL;
| [
"[email protected]"
] | |
459168961ff18e611eb3a20384429e8466f547ec | ecf0d106831b9e08578845674a457a166b6e0a14 | /programming_basics/EXAM_PREPARATION/6_baking_competition.py | 9552197aa4b84f14c2e538ac66c099c7e7be2450 | [] | no_license | ivo-bass/SoftUni-Solutions | 015dad72cff917bb74caeeed5e23b4c5fdeeca75 | 75612d4bdb6f41b749e88f8d9c512d0e00712011 | refs/heads/master | 2023-05-09T23:21:40.922503 | 2021-05-27T19:42:03 | 2021-05-27T19:42:03 | 311,329,921 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | sweets_prices = {"cookies": 1.50, "cakes": 7.80, "waffles": 2.30}
sweets_count_total = {"cookies": 0, "cakes": 0, "waffles": 0}
participants_count = int(input())
for _ in range(participants_count):
sweets_per_participant = {"cookies": 0, "cakes": 0, "waffles": 0}
baker = input()
sweets_type = input()
while sweets_type != "Stop baking!":
sweets_count = int(input())
sweets_per_participant[sweets_type] += sweets_count
sweets_count_total[sweets_type] += sweets_count
sweets_type = input()
print(f'{baker} baked '
f'{sweets_per_participant["cookies"]} cookies, '
f'{sweets_per_participant["cakes"]} cakes and '
f'{sweets_per_participant["waffles"]} waffles.')
total_sold = sweets_count_total["cookies"] + \
sweets_count_total["cakes"] + \
sweets_count_total["waffles"]
total_sum = sweets_count_total["cookies"] * sweets_prices["cookies"] + \
sweets_count_total["cakes"] * sweets_prices["cakes"] + \
sweets_count_total["waffles"] * sweets_prices["waffles"]
print(f"All bakery sold: {total_sold}")
print(f"Total sum for charity: {total_sum:.2f} lv.")
| [
"[email protected]"
] | |
0792edfcdab56134a64173a4679ac2670b36c589 | e9973cbb3ceef6941ed6bc9744b0619d1cbac157 | /blender/arm/logicnode/action_set_visible.py | 1cefa30d81bd176229e2378961fc0e677cd855db | [
"Zlib",
"GPL-2.0-only"
] | permissive | ForestCSharp/armory | 5da5551a9ef344ad7ea44f55b6c52c07fa6302d5 | 89586682154e5fcef1529b2123c5a2d48ac7e982 | refs/heads/master | 2020-12-15T04:09:12.063333 | 2020-03-07T14:44:02 | 2020-03-07T14:44:02 | 234,990,289 | 1 | 0 | Zlib | 2020-03-07T14:44:03 | 2020-01-20T00:42:21 | Python | UTF-8 | Python | false | false | 565 | py | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SetVisibleNode(Node, ArmLogicTreeNode):
'''Set visible node'''
bl_idname = 'LNSetVisibleNode'
bl_label = 'Set Visible'
bl_icon = 'QUESTION'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('ArmNodeSocketObject', 'Object')
self.inputs.new('NodeSocketBool', 'Bool')
self.outputs.new('ArmNodeSocketAction', 'Out')
add_node(SetVisibleNode, category='Action')
| [
"[email protected]"
] | |
191db65073f52a890b3b84415c335f3be5793331 | b4484e6309bc538f87ca9f4692b6e5c47da4c449 | /stacks-and-queues/stack-and-queue-NEW/dynamicArrayQueue/main.py | df83210062c4b75d4a6dc7195976c17f5f418188 | [] | no_license | teknofage/CS-1.3-Core-Data-Structures | 5178077c5995655bfecfe5e2c29d4405b1dabc6b | c2a439188113c0fc4175ff99cebba831ac5960d5 | refs/heads/master | 2022-12-22T18:17:50.463788 | 2020-10-07T23:58:29 | 2020-10-07T23:58:29 | 291,888,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from Queue import Queue
'''#CREATE a queue
#front at index 0
#back at index n - 1
my_queue = []
#UPDATE, ADD
#enqueue
my_queue.append("A")
my_queue.append("B")
my_queue.append("C")
#DELETE
#dequeue
my_queue.pop(0)
#READ
#front
print(my_queue[0])'''
#CREATE
my_queue = Queue()
my_queue.enqueue("A")
#["A"]
my_queue.enqueue("B")
#["A", "B"]
my_queue.enqueue("C")
#["A", "B", "C"]
print(my_queue.front())
| [
"[email protected]"
] | |
b7215d422a5dfcf2055f1a7256eecebe08f2f804 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02934/s538390286.py | 0244438040ed16b0988ea23f2cb1e3db34324d0a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | #!/usr/bin/env python3
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
def main():
N = int(readline())
As = map(int, readline().split())
ans = 0
for a in As:
ans += 1 / a
print(1/ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e98dbe14df42a16853ab45a34a4f32dbb1f2e053 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2638/60836/314330.py | dd3bfd48d6ff027cf4fab8a595b1773601811862 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | """
第一行包含两个正整数N、M,分别表示数列中实数的个数和操作的个数
第二行包含N个实数,其中第i个实数表示数列的第i项
接下来M行,每行为一条操作,格式为以下两种之一:
操作1:1 x y k ,表示将第x到第y项每项加上k,k为一实数
操作2:2 x y ,表示求出第x到第y项这一子数列的平均数
操作3:3 x y ,表示求出第x到第y项这一子数列的方差
5 5
1 5 4 2 3
2 1 4
3 1 5
1 1 1 1
1 2 2 -1
3 1 5
"""
NM=[int(m) for m in str(input()).split(" ")]
N=NM[0]
M=NM[1]
arr=[int(m) for m in str(input()).split(" ")]
instruction=[]
for i in range(M):
instruction.append([int(m) for m in str(input()).split(" ")])
for i in range(M):
if(instruction[i][0]==1):
x=instruction[i][1]-1
y=instruction[i][2]-1
k=instruction[i][3]
while(x<=y):
arr[x]+=k
x+=1
if(instruction[i][0]==2):
x = instruction[i][1] - 1
y = instruction[i][2] - 1
print('%.4f' % (sum(arr[x:y+1])/float(y-x+1)))
if(instruction[i][0]==3):
x = instruction[i][1] - 1
y = instruction[i][2] - 1
s = y - x + 1
E=sum(arr[x:y+1])/float(s)
first=0
while(x<=y):
first+=pow(arr[x]-E,2)
x+=1
print('%.4f' % (first/s)) | [
"[email protected]"
] | |
42fe82c8265bc7a52b94883083b19770d75793da | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/applicationcontrol/zopeversion.py | 85daa3ab7e652fa583597c1d4a5e1a8224c1ba05 | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility to retrieve the Zope version.
$Id: zopeversion.py 26884 2004-08-03 21:31:09Z fdrake $
"""
__docformat__ = 'restructuredtext'
import os
import re
import zope.app
from zope.app.applicationcontrol.interfaces import IZopeVersion
from zope.interface import implements
class ZopeVersion(object):
implements(IZopeVersion)
__entries = re.compile(r'(url|revision)\s*=\s*"([^"]+)"')
__tags = re.compile(r'/(tags|branches)/([^/]+)/')
def __init__(self, path=None):
if path is None:
path = os.path.dirname(os.path.abspath(zope.app.__file__))
self.path = path
self.result = None
def getZopeVersion(self):
"""See zope.app.applicationcontrol.interfaces.IZopeVersion"""
if self.result is not None:
return self.result
self.result = "Development/Unknown"
# is this a SVN checkout?
svndir = os.path.join(self.path, ".svn")
if os.path.isdir(svndir):
self.__setSVNVersion(svndir)
else:
# try to get official Zope release information
versionfile = os.path.join(self.path, "version.txt")
if os.path.isfile(versionfile):
f = file(versionfile)
self.result = f.readline().strip() or self.result
f.close()
return self.result
def __setSVNVersion(self, svndir):
entriesfile = os.path.join(svndir, "entries")
# get the version information
if os.path.isfile(entriesfile):
f = file(entriesfile)
url, revision = "", ""
for line in f:
match = self.__entries.search(line)
if match is not None:
name, value = match.group(1, 2)
if name == "url":
url = value
elif name == "revision":
revision = value
if url and revision:
break
f.close()
if revision and url:
match = self.__tags.search(url)
tag = ""
if match is not None:
type, value = match.group(1, 2)
if type == "tags":
tag = "/Tag: %s" % value
elif type == "branches":
tag = "/Branch: %s" % value
self.result = "Development/Revision: %s%s" \
% (revision, tag)
ZopeVersionUtility = ZopeVersion()
| [
"[email protected]"
] | |
109e8140c1ba99a672fa1b549b8707bd4b670ffe | 8df5df20ac10a8dc81f7ac6e21e835553a8f5e2d | /src/sleekapps/graphql/threads/schemas/post.py | 8c40cb5a4ae534582fcb193dc02a176600fe4dd5 | [] | no_license | adepeter/sleekforum | 7be71907d26623c43cd78a6da77a2398c1c25e26 | 35385e648974cdf009732af4c50b69a1825f7fda | refs/heads/master | 2022-09-18T02:45:42.522128 | 2021-10-23T06:41:44 | 2021-10-23T06:41:44 | 208,669,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from ..queries.post import PostQuery
from ..mutations.post import PostMutation
class PostQuerySchema(PostQuery):
pass
class PostMutationSchema(PostMutation):
pass
| [
"[email protected]"
] | |
9558f84e7749ea48130268d84fe966aaf4c5aaf9 | 2b468b1d22ecc5668529255676a1d43936829074 | /codes/personal_backend/tuoen/abs/middleware/data/base.py | 875691898921e3a908140f96a3591c6809f9de51 | [] | no_license | MaseraTiGo/4U | 5ac31b4cccc1093ab9a07d18218c3d8c0157dc9c | f572830aa996cfe619fc4dd8279972a2f567c94c | refs/heads/master | 2023-07-26T09:44:21.014294 | 2023-07-13T03:43:34 | 2023-07-13T03:43:34 | 149,217,706 | 0 | 0 | null | 2020-06-05T20:38:16 | 2018-09-18T02:34:29 | Python | UTF-8 | Python | false | false | 9,616 | py | # coding=UTF-8
import math
import time
import datetime
import xlrd
from xlrd import xldate_as_tuple
import threading
from tuoen.sys.utils.cache.redis import redis
from tuoen.sys.core.exception.business_error import BusinessError
from tuoen.sys.utils.common.dictwrapper import DictWrapper
from tuoen.sys.core.field.base import BaseField
from tuoen.sys.utils.common.split_page import Splitor
from model.store.model_import import ImportStatus
class ExcelDateTimeField(BaseField):
def exec_excel(self, value):
if not value:
value = Null
else:
date = xldate_as_tuple(value, 0)
value = datetime.datetime(*date)
return value
def exec_string(self, value, fmt):
return datetime.datetime.strptime(value, fmt)
def exec_successive(self, value):
return self.exec_string(value, '%Y%m%d%H%M%S')
def exec_standards(self, value):
return self.exec_string(value, '%Y-%m-%d %H:%M:%S')
def exec_standards_half(self, value):
return self.exec_string(value, '%Y-%m-%d')
def exec_half(self, value):
return self.exec_string(value, '%Y%m%d')
def exec_slash(self, value):
return self.exec_string(value, '%Y/%m/%d %H:%M')
def exec_slash_half(self, value):
return self.exec_string(value, '%Y/%m/%d')
def exec_point_half(self, value):
return self.exec_string(value, '%Y.%m.%d')
def parsing(self, value):
result = None
for helper in (self.exec_excel, self.exec_standards, self.exec_standards_half, self.exec_successive, \
self.exec_half, self.exec_slash, self.exec_slash_half, self.exec_point_half):
try:
result = helper(value)
break
except Exception as e:
pass
'''
if result is None:
raise Exception("excel datatime format error")
'''
return result
def formatting(self, value):
if not isinstance(value, datetime.datetime):
raise debugerror()
return value.strftime("%y-%m-%d %h:%m:%s")
class ExcelDeletePointField(BaseField):
def parsing(self, value):
if not value:
return ""
return str(value).split('.')[0]
def formatting(self, value):
return str(value)
class ExcelMoneyField(BaseField):
def parsing(self, value):
if not value:
return 0
return int(float(value) * 100)
def formatting(self, value):
return str(round(value / 100, 2))
class BaseImport(object):
def get_object_byid(self, id):
object = self.get_exec_cls().get_byid(id)
if object is None:
raise BusinessError("该信息不存在")
return object
def update_object(self, object, **attr):
object.update(**attr)
return True
def get_exec_cls(self):
raise NotImplementedError('Please imporlement this interface in subclass')
def get_fields(self):
raise NotImplementedError('Please imporlement this interface in subclass')
def read(self, f):
raise NotImplementedError('Please imporlement this interface in subclass')
def search_qs(self, **search_info):
search_qs = self.get_exec_cls().query(**search_info)
if "create_time_start" in search_info:
search_qs = search_qs.filter(create_time__gte = search_info["create_time_start"])
if "create_time_end" in search_info:
search_qs = search_qs.filter(create_time__lt = search_info["create_time_end"])
search_qs = search_qs.order_by("-id")
return search_qs
def search(self, current_page, **search_info):
search_qs = self.search_qs(**search_info)
page = Splitor(current_page, search_qs)
page.is_converting = self.check_redis()
return page
def check_redis(self):
is_converting = False
redis_name = self.get_redis_name()
try:
redis.get(redis_name)
is_converting = True
except Exception as e:
pass
return is_converting
def get_convert_list(self, **search_info):
search_info.update({"status":"init"})
search_qs = self.search_qs(**search_info)
return search_qs
def exec_convet(self):
raise NotImplementedError('Please imporlement this interface in subclass')
def check(self, row_list):
fields = self.get_fields()
row_infos = DictWrapper({})
error_infos = {}
for index, field in enumerate(fields):
cell = row_list[index]
if isinstance(cell, str):
cell = cell.strip()
key, helper = fields[index]
try:
row_infos.update({key: helper.parse(cell)})
except Exception as e:
error_infos.update({key: [helper, index]})
error_msg = ""
if error_infos:
error_info_list = [ helper.get_desc() \
for helper, _ in error_infos.values()]
error_msg = ', '.join(error_info_list)
return row_infos, error_msg
def store(self, data_infos):
obj = self.get_exec_cls()()
for key, value in data_infos.items():
setattr(obj, key, value)
return obj
# return self.get_exec_cls().create(**data_infos)
def get_queue(self, queue_len, size = 100):
cycle = int(math.ceil(queue_len / size))
for index in range(cycle):
yield index * size, (index + 1) * size
def run(self, f):
import_list, error_list = self.read(f)
if error_list:
# print('check error ', error_list)
return [], error_list
obj_list = []
queue_len = len(import_list)
for start, end in self.get_queue(queue_len):
store_list = []
for import_data in import_list[start:end]:
store = self.store(import_data)
store_list.append(store)
cur_obj_list = self.get_exec_cls()\
.objects.bulk_create(store_list)
obj_list.extend(cur_obj_list)
return obj_list, error_list
def convert_prepare(self, convert_list):
return convert_list, []
def convert(self, **search_info):
t = time.time()
redis_name = self.get_redis_name()
is_redis_exist = False
try:
redis.get(redis_name)
is_redis_exist = True
# redis.delete(redis_name)
except Exception as e:
redis.set(redis_name, 1)
test_thread = threading.Thread(target = self.convert_in, args = (redis_name,), kwargs = search_info)
test_thread.start()
if is_redis_exist:
raise BusinessError("该任务已存在请不要重复转化")
print(time.time() - t)
print("----------------------------->baseconvert done")
return [], []
def convert_after(self, success_list, failed_list):
return success_list, failed_list
def convert_in(self, redis_name, **search_info):
convert_list = self.get_convert_list(**search_info)
if len(convert_list) == 0:
redis.delete(redis_name)
return [], []
exec_list, fail_list = self.convert_prepare(convert_list)
success_list, failed_list = [], fail_list
over_time = 60 * 60 * 2
first_time = time.time()
for index, convert_obj in enumerate(exec_list):
now_time = time.time()
if now_time - first_time > over_time:
break
convert_obj.update(status = ImportStatus.EXCUTTING)
try:
is_success, error_text = self.exec_convet(convert_obj)
except Exception as e:
print("--------------------------->exception", e)
convert_obj.update(status = ImportStatus.FAILED, error_text = "系统异常,请联系管理员")
failed_list.append(convert_obj)
redis.delete(redis_name)
else:
if is_success:
convert_obj.update(status = ImportStatus.FINISH)
success_list.append(convert_obj)
else:
convert_obj.update(status = ImportStatus.FAILED, error_text = error_text)
failed_list.append(convert_obj)
print("convert items ---> ", index + 1)
redis.delete(redis_name)
class ExcelImport(BaseImport):
def read(self, f):
if type(f) == str:
workbook = xlrd.open_workbook(f)
else:
workbook = xlrd.open_workbook(file_contents = f)
sheet_names = workbook.sheet_names()
fields = self.get_fields()
data_list, error_list = [], []
for sheet_name in sheet_names:
sheet = workbook.sheet_by_name(sheet_name)
print("====", sheet.ncols, len(fields))
if sheet.ncols != len(fields):
error = '表结构错误'
error_list.append(error)
break
for row_index in range(1, sheet.nrows, 1):
row = sheet.row_values(row_index)
row_infos, error_msg = self.check(row)
if error_msg:
error = "[ {row}row ]: {error} format error"\
.format(row = row_index, error = error_msg)
error_list.append(error)
else:
data_list.append(row_infos)
return data_list, error_list
| [
"[email protected]"
] | |
8b15af25bd12ee28dd60bfa50ef64233eade6b3e | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /atcoder/abc170_d.py | b793c3b0e59bf00a5329e943ebdaf51604ca831c | [] | no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # https://atcoder.jp/contests/abc170/tasks/abc170_d
import numpy as np
import sys
input = sys.stdin.buffer.readline
N = int(input())
A = list(map(int, (input().split())))
A.sort()
cnt = np.zeros(10**6+10, dtype=np.int32)
for x in A:
if cnt[x] != 0:
cnt[x] = 2
continue
cnt[x::x] += 1
ans = 0
for x in A:
if cnt[x] == 1:
ans += 1
print(ans)
| [
"[email protected]"
] | |
4963ddc9ec0ab89e820dc166e2b0b09e119c8279 | c0724512d43de4629985cb0af1a6a23a206c1302 | /torch/fx/experimental/symbolic_shapes.py | 9ece19aff10daee0968a3ae23ebebf99bd8df662 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | malfet/pytorch | 77abb3a68259c8ea28a458fd5b3fd5ec5b71498c | 79db5bcc9d3febad00e5a2234b44c7db87defdab | refs/heads/master | 2023-08-03T12:14:18.308137 | 2023-02-01T03:41:16 | 2023-02-01T03:41:20 | 139,209,347 | 0 | 1 | null | 2018-06-30T01:15:55 | 2018-06-30T01:15:55 | null | UTF-8 | Python | false | false | 45,918 | py | import torch
from typing import Set, Dict, List, Type, Optional, cast, Union
import sys
import itertools
import operator
import math
import functools
import threading
from contextlib import contextmanager
from functools import lru_cache
import traceback
import collections
import textwrap
import logging
# NB: The sym_* functions are used via getattr() and must be imported here.
from torch import SymInt, SymFloat, SymBool, sym_not, sym_float, sym_int, sym_max, sym_min # noqa: F401
from torch._guards import ShapeGuard, Source
SymTypes = (SymInt, SymFloat, SymBool)
log = logging.getLogger(__name__)
class GuardOnDataDependentSymNode(RuntimeError):
pass
try:
import sympy # type: ignore[import]
from sympy.printing.precedence import precedence # type: ignore[import] # noqa: F401
from sympy.printing.str import StrPrinter # type: ignore[import]
HAS_SYMPY = True
except ImportError:
HAS_SYMPY = False
aten = torch._ops.ops.aten # type: ignore[has-type]
__all__ = [
"has_symbolic_sizes_strides", "create_contiguous", "ShapeEnv",
"SymDispatchMode", "FloorDiv", "guard_int", "wrap_node",
]
SYM_FUNCTION_MODE = None
# We don't bother with the metaclass as all of the dispatching logic happens
# entirely from Python
#
# Didn't bother with ancestors for now, unlikely to have multiple modes for
# symints right now
# SymDispatchMode gets invoked whenever an operation is processed on
# a PySymInt. When this occurs, you get called at __sym_dispatch__
# with the operation in question. This is symmetric to TorchDispatchMode
# but with some caveats:
#
# - In TorchDispatchMode, you get the same arguments as what a user
# invoked your API with; e.g., if you call torch.ops.aten.foo(a, b),
# you get (a, b) as args to your call. In SymDispatchMode, if
# you call a + b (where a and b are SymInts), you will get
# (a.node, b.node) as your args (these are PySymInts)
#
# - SymInt/PySymInt don't have FX proxy support (unlike, e.g., Tensor).
# So you have to manually call Tracer/create_node to write into
# the graph. See ProxySymDispatchMode for an example
#
class SymDispatchMode:
def __sym_dispatch__(self, func, types, args, kwargs):
raise NotImplementedError()
def __enter__(self):
global SYM_FUNCTION_MODE
old = SYM_FUNCTION_MODE
if hasattr(self, "inner"):
raise RuntimeError(f"{self} has already been used as a mode. Please use a fresh version")
else:
self.inner = old
SYM_FUNCTION_MODE = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global SYM_FUNCTION_MODE
SYM_FUNCTION_MODE = self.inner
def has_symbolic_sizes_strides(elem):
return elem._has_symbolic_sizes_strides
def create_contiguous(shape):
strides = [1]
for dim in reversed(shape[:-1]):
strides.append(dim * strides[-1])
return list(reversed(strides))
def _handle_sym_dispatch(func, args, kwargs):
global SYM_FUNCTION_MODE
mode = SYM_FUNCTION_MODE
assert mode
SYM_FUNCTION_MODE = mode.inner
try:
# TODO: properly compute types
types: List[Type] = []
return mode.__sym_dispatch__(func, types, args, kwargs)
finally:
SYM_FUNCTION_MODE = mode
def guard_int(a):
if isinstance(a, SymInt):
return a.node.guard_int("", 0) # NB: uses Python backtrace
assert type(a) is int
return a
# Drop in replacement for math.sqrt
def sym_sqrt(a):
if hasattr(a, '__sym_sqrt__'):
return a.__sym_sqrt__()
return math.sqrt(a)
def to_node(self, num):
if isinstance(num, SymTypes):
return num.node
elif type(num) is bool:
return self.wrap_bool(num)
elif type(num) is int:
return self.wrap_int(num)
elif type(num) is float:
return self.wrap_float(num)
else:
# NotImplemented is important so that Python tries the
# other magic method
return NotImplemented
# Given a GraphModule, return all the FakeTensors for all the placeholders
def fx_placeholder_vals(gm):
return [n.meta['val'] for n in gm.graph.nodes if n.op == "placeholder"]
# Given a GraphModule and arguments to run it with, evaluate that the guards
# for its associated ShapeEnv are satisfied by the passed arguments. This
# WILL check for duck sizing.
def eval_guards(gm, *args):
return gm.shape_env.evaluate_guards_for_args(fx_placeholder_vals(gm), args)
def bind_symbols(gm, *args):
return gm.shape_env.bind_symbols(fx_placeholder_vals(gm), args)
# TODO: An incomplete list
# 1. Set variables to be equal when we do equality
# 2. Specialize on 0/1 when we do subtraction
class SymNode:
"""
This is a type erased SymInt/SymFloat which we use to do actual operations.
End users don't touch this. Magic methods are NOT defined on this object.
"""
def __init__(self, expr, shape_env, pytype, constant=None):
self._expr = expr
self.shape_env = shape_env
self.pytype = pytype
self.constant = constant
@property
def expr(self):
self._update_expr()
return self._expr
def _update_expr(self):
self._expr = self.shape_env.replace(self._expr)
def is_int(self):
return self.pytype is int
def is_float(self):
return self.pytype is float
def is_bool(self):
return self.pytype is bool
def wrap_int(self, num):
assert type(num) is int
return SymNode(sympy.Integer(num), self.shape_env, int, constant=num)
def wrap_float(self, num):
assert type(num) is float
return SymNode(sympy.Float(num), self.shape_env, float, constant=num)
def wrap_bool(self, num):
assert type(num) is bool
return SymNode(sympy.true if num else sympy.false, self.shape_env, bool, constant=num)
def clone(self):
return self
def str(self):
return f"{self.expr}"
def __str__(self):
return self.str()
def __repr__(self):
return self.str()
# These methods are metaprogrammed in below
def sym_int(self) -> "SymNode": # noqa: F811
raise AssertionError("should have been overridden")
def sym_float(self) -> "SymNode": # noqa: F811
raise AssertionError("should have been overridden")
def or_(self, other) -> "SymNode": # noqa: F811
raise AssertionError("should have been overridden")
def and_(self, other) -> "SymNode": # noqa: F811
raise AssertionError("should have been overridden")
# Make C++ happy
def sym_or(self, other):
return self.or_(other)
def sym_and(self, other):
return self.and_(other)
# Today we error on calling int on a symbolic shape, as this is a very accessible footgun.
def int_(self):
if len(self.expr.free_symbols) == 0:
return int(self.expr)
raise RuntimeError(f"Trying to extract a concrete int out of a symbolic int {self.expr}")
# You can manually trigger a guard with this function
def guard_int(self, file, line):
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
r = self.shape_env.evaluate_expr(self.expr)
try:
return int(r)
except Exception:
log.warn(f"Failed to convert to int: {r}")
raise
def guard_float(self, file, line):
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
r = self.shape_env.evaluate_expr(self.expr)
try:
return float(r)
except Exception:
log.warn(f"Failed to convert to float: {r}")
raise
def guard_bool(self, file, line):
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
# TODO: why is the replace needed here?
r = self.shape_env.evaluate_expr(self.shape_env.replace(self.expr))
try:
return bool(r)
except Exception:
log.warn(f"Failed to convert to bool: {r}")
raise
def bool_(self):
return self.guard_bool("", 0)
if HAS_SYMPY:
class FloorDiv(sympy.Function):
"""
We maintain this so that:
1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b.
2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b)
"""
nargs = (2,)
precedence = 50 # precedence of mul # noqa: F811
def _sympystr(self, printer):
lhs = self.args[0]
rhs = self.args[1]
lhs_str = printer.parenthesize(lhs, self.precedence)
rhs_str = printer.parenthesize(rhs, self.precedence)
return f"{lhs_str}//{rhs_str}"
@classmethod
def eval(cls, base, divisor):
if base == 0:
return sympy.Integer(0)
if divisor == 1:
return base
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
return base // divisor
if isinstance(base, FloorDiv):
return FloorDiv(base.args[0], base.args[1] * divisor)
if isinstance(base, sympy.Add):
for a in base.args:
gcd = sympy.gcd(a, divisor)
if gcd == divisor:
return FloorDiv(base - a, divisor) + a / gcd
gcd = sympy.gcd(base, divisor)
if gcd != 1:
return FloorDiv(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd)
)
class IsNonOverlappingAndDenseIndicator(sympy.Function):
is_integer = True
@classmethod
def eval(cls, *args):
assert len(args) % 2 == 0
if all(isinstance(a, sympy.Integer) for a in args):
dim = len(args) // 2
sizes = args[0:dim]
strides = args[dim:]
return int(eval_is_non_overlapping_and_dense(
[int(s) for s in sizes],
[int(s) for s in strides]
))
return None
@lru_cache(256)
def safe_expand(r):
if hasattr(r, 'expand'):
return sympy.expand(r)
else:
return r
# Methods that have a `__foo__` as well as `__rfoo__`
reflectable_magic_methods = {
'add': lambda a, b: a + b,
'sub': lambda a, b: a - b,
'mul': lambda a, b: a * b,
'mod': lambda a, b: a % b,
'pow': lambda a, b: a ** b,
'and': lambda a, b: a & b,
'or': lambda a, b: a | b,
'truediv': lambda a, b: a / b,
'floordiv': lambda a, b: FloorDiv(a, b),
}
magic_methods = {
**reflectable_magic_methods,
'sym_not': lambda a: ~a,
'eq': lambda a, b: sympy.Eq(a, b),
'ne': lambda a, b: sympy.Ne(a, b),
'gt': lambda a, b: sympy.Gt(a, b),
'lt': lambda a, b: sympy.Lt(a, b),
'le': lambda a, b: sympy.Le(a, b),
'ge': lambda a, b: sympy.Ge(a, b),
'floor': lambda a: sympy.floor(a),
'sym_float': lambda a: a, # Cannot use sympy.Float(a) here, coz it expects python literals
'ceil': lambda a: sympy.ceiling(a),
'neg': lambda a: -a,
'sym_min': lambda a, b: sympy.Min(a, b),
'sym_max': lambda a, b: sympy.Max(a, b),
'sym_sqrt': lambda a: sympy.sqrt(a),
}
sizes_strides_methods = {
'is_non_overlapping_and_dense': lambda *args: IsNonOverlappingAndDenseIndicator(*args),
}
# TODO: Deduplicate this with torch/_prims_common/__init__.py
def eval_is_non_overlapping_and_dense(sizes, strides):
dim = len(sizes)
# Short-circuits for tensors of rank one, which are
# non-overlapping and "dense" if their stride is one
# or it is a 0/1 element tensor
if dim == 1:
return strides[0] == 1 or sizes[0] < 2
# Checks that there exists a permutation of the strides s.t. the tensor would be contiguous
# Sorts (length, stride) pairs by stride
lengths_and_strides = sorted(
tuple(zip(sizes, strides)), key=operator.itemgetter(1)
)
# Unlike the C++ code, we don't move the 0/1 size dimensions to the
# end. So we have to keep going for this code.
expected_stride = 1
for length, stride in lengths_and_strides:
if length == 1:
continue
if stride != expected_stride:
return False
expected_stride *= length
return True
def is_non_overlapping_and_dense(sizes, strides):
base = None
for s in itertools.chain(sizes, strides):
if isinstance(s, SymInt):
base = s
break
assert base is not None
return wrap_node(base.node.is_non_overlapping_and_dense(
[to_node(base.node, s) for s in sizes],
[to_node(base.node, s) for s in strides],
))
unary_magic_methods = {
'sym_float',
'ceil',
'floor',
'neg',
'sym_sqrt',
'sym_not',
}
bool_magic_methods = {"and", "or", "sym_not"}
magic_methods_on_math = {"ceil", "floor"}
magic_methods_on_submodule = {"sym_float", "sym_sqrt", "sym_min", "sym_max", "sym_not"}
magic_methods_on_operator_with_trailing_underscore = {"and", "or"}
always_float_magic_methods = {"truediv", "sym_float", "sym_sqrt"}
always_int_magic_methods = {"ceil", "floor"}
always_bool_magic_methods = {"eq", "ne", "gt", "lt", "le", "ge", "and", "or", "sym_not", "is_non_overlapping_and_dense"}
def wrap_node(x):
# TODO: let C++ also take advantage of this
if isinstance(x, SymNode) and x.constant is not None:
return x.constant
if x.is_int():
return SymInt(x)
elif x.is_float():
return SymFloat(x)
elif x.is_bool():
return SymBool(x)
else:
raise AssertionError(f"unrecognized return type {x}")
def _make_node_magic(method, func):
func = lru_cache(256)(func)
if method in magic_methods_on_operator_with_trailing_underscore:
method_attr = f"{method}_"
else:
method_attr = method
def binary_magic_impl(self, other):
if method in magic_methods_on_submodule:
op = getattr(sys.modules[__name__], method_attr)
else:
assert method not in magic_methods_on_math
op = getattr(operator, method_attr)
if SYM_FUNCTION_MODE:
r = _handle_sym_dispatch(op, (wrap_node(self), wrap_node(other)), {})
assert isinstance(r, SymTypes), type(r)
return r.node
assert isinstance(other, SymNode)
other_expr = other.expr
# TODO: consider constant prop here
expr = self.shape_env.replace(self.expr)
other_expr = self.shape_env.replace(other_expr)
try:
out = func(expr, other_expr)
except Exception:
log.warning(f"failed to eval {method}({expr}, {other_expr})")
raise
out = safe_expand(out)
pytype: Type
if method in always_float_magic_methods:
pytype = float
elif method in always_bool_magic_methods:
pytype = bool
else:
pytype = self.pytype
return SymNode(out, self.shape_env, pytype)
def unary_magic_impl(self):
if SYM_FUNCTION_MODE:
if method in magic_methods_on_math:
op = getattr(math, method_attr)
elif method in magic_methods_on_submodule:
op = getattr(sys.modules[__name__], method_attr)
else:
op = getattr(operator, method_attr)
r = _handle_sym_dispatch(op, (wrap_node(self),), {})
assert isinstance(r, SymTypes), type(r)
return r.node
# TODO: consider constant prop here
expr = self.shape_env.replace(self.expr)
try:
out = func(expr)
except Exception:
log.warning(f"failed to eval {method}({expr})")
raise
out = safe_expand(out)
pytype: Type
if method in always_int_magic_methods:
pytype = int
elif method in always_float_magic_methods:
pytype = float
else:
pytype = self.pytype
return SymNode(out, self.shape_env, pytype)
if method in unary_magic_methods:
setattr(SymNode, method_attr, unary_magic_impl)
else:
setattr(SymNode, method_attr, binary_magic_impl)
def _make_node_sizes_strides(method, func):
# NB: don't LRU cache, lots of arguments
def sizes_strides_impl(self, sizes, strides):
op = getattr(sys.modules[__name__], method)
if SYM_FUNCTION_MODE:
r = _handle_sym_dispatch(op, ([wrap_node(s) for s in sizes], [wrap_node(s) for s in strides]), {})
assert isinstance(r, SymBool), type(r)
return r.node
size_exprs = [s.expr for s in sizes]
stride_exprs = [s.expr for s in strides]
try:
out = func(*size_exprs, *stride_exprs)
except Exception:
log.warning(f"failed to eval {method}(*{size_exprs}, *{stride_exprs})")
raise
# bool is never expandable
return SymNode(sympy.Eq(out, 1), self.shape_env, bool)
setattr(SymNode, method, sizes_strides_impl)
for method, func in magic_methods.items():
_make_node_magic(method, func)
for method, func in sizes_strides_methods.items():
_make_node_sizes_strides(method, func)
def _make_user_magic(method, user_type):
# User magic takes care of wrapping the other operand into a node,
# so that our internal logic can assume everything is nodes
if method in magic_methods_on_operator_with_trailing_underscore:
method_attr = f"{method}_"
else:
method_attr = method
def unary_magic_impl(self):
return wrap_node(getattr(self.node, method_attr)())
def binary_magic_impl(self, other):
other_node = to_node(self.node, other)
if other_node is NotImplemented:
return NotImplemented
return wrap_node(getattr(self.node, method_attr)(other_node))
def rbinary_magic_impl(self, other):
other_node = to_node(self.node, other)
if other_node is NotImplemented:
return NotImplemented
return wrap_node(getattr(other_node, method_attr)(self.node))
if method in unary_magic_methods:
setattr(user_type, f"__{method}__", unary_magic_impl)
else:
setattr(user_type, f"__{method}__", binary_magic_impl)
if method in reflectable_magic_methods:
setattr(user_type, f"__r{method}__", rbinary_magic_impl)
for method, func in magic_methods.items():
if method in bool_magic_methods:
_make_user_magic(method, SymBool)
else:
_make_user_magic(method, SymInt)
_make_user_magic(method, SymFloat)
del method
del func
def _lru_cache(fn, maxsize=None):
"""
Wrapper around lru_cache that clears when new info about shapes has been
updated.
Use lru_cache if the output is always the same, regardless of the
constraints we know now (i.e. evaluate_expr)
Use _lru_cache otherwise.
"""
fn_cache = lru_cache(maxsize)(fn)
prior_key = None
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
nonlocal prior_key
if prior_key != self._get_key():
prior_key = self._get_key()
fn_cache.cache_clear()
return fn_cache(self, *args, **kwargs)
wrapper.cache_info = fn_cache.cache_info # type: ignore[attr-defined]
return wrapper
if HAS_SYMPY:
# This stub exists so we can easily add metadata to sympy symbols
# NB: This inherits from Dummy, not Symbol, because Symbols with the same
# name get interned. This is bad for us as we want the metadata
# to vary across different invocations and not leak.
class Symbol(sympy.Dummy):
__slots__: List[str] = ['sources', 'stack']
sources: List[Source]
stack: Optional[str]
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
self.sources = []
self.stack = None
return self
class ShapeGuardPrinter(StrPrinter):
def __init__(
self,
symbol_to_source,
source_ref,
):
super().__init__()
self.symbol_to_source = symbol_to_source
self.source_ref = source_ref
def _print_Symbol(self, expr) -> str:
assert isinstance(expr, Symbol), str(type(expr))
assert expr in self.symbol_to_source, (
f"{expr} (could be from {[s.name() for s in expr.sources]}) "
f"not in {self.symbol_to_source}"
)
return self.source_ref(self.symbol_to_source[expr][0])
class ShapeEnv(object):
def __init__(self):
self.guards: List[ShapeGuard] = []
# Maps symbolic ints to their original concrete values
# Currently populated from tensors
self.var_to_val: Dict["sympy.Symbol", "sympy.Integer"] = {}
# Maps from sympy ints to expressions representing them
# Populated from equality guards (i.e. a.shape[0] == b.shape[0])
self.replacements: Dict["sympy.Symbol", "sympy.Expr"] = {} #
# Set holds a % b expressions that evaluate to 0.
self.divisible: Set["sympy.Expr"] = set()
# Duck-shaping says that if two input tensors have the same size,
# they get assigned the same symbolic variable
self.val_to_var: Dict[int, "sympy.Expr"] = {0: sympy.Integer(0), 1: sympy.Integer(1)}
self.tls = threading.local()
self.unbacked_symfloat_counter = itertools.count()
self.unbacked_symint_counter = itertools.count()
def _suppress_guards_tls(self):
return getattr(self.tls, "suppress_guards", False)
@contextmanager
def suppress_guards(self):
self.tls.suppress_guards = True
try:
yield
finally:
self.tls.suppress_guards = False
def _get_key(self):
"""
Defines the current "state" of the guards we've accumulated in this ShapeEnv.
Determines when we need to invalidate our cache
"""
return (len(self.replacements), len(self.divisible))
def create_symbolic_sizes_strides_storage_offset(self, ex: torch.Tensor, source: Source):
"""
Returns a list of symbolic sizes and strides for the given tensor.
We try our best to express stride in terms of the sizes, so as to not
introduce new symbolic variables.
"""
from torch._dynamo.source import TensorPropertySource, TensorProperty
size = [
self.create_symbol(
val, TensorPropertySource(source, TensorProperty.SIZE, i)
) for i, val in enumerate(ex.size())
]
stride: List[Optional[sympy.Expr]] = [None] * len(size)
for i, val in enumerate(ex.stride()):
if val in (0, 1):
stride[i] = sympy.Integer(val)
while any(x is None for x in stride):
candidates = {
ex.size(i) * ex.stride()[i]: size[i] * stride[i]
for i in range(len(size))
if stride[i] is not None and ex.stride()[i] >= 0
}
# iterate over unbound strides in sorted order
val_list = sorted(
[(ex.stride()[i], i) for i in range(len(stride)) if stride[i] is None]
)
for _, i in val_list:
if stride[i] is None and ex.stride()[i] in candidates:
stride[i] = candidates[ex.stride()[i]]
candidates[ex.size(i) * ex.stride()[i]] = size[i] * stride[i]
if any(x is None for x in stride):
# bind the smallest unbound stride to a new variable
val, i = min(
[
(ex.stride()[i], i)
for i in range(len(stride))
if stride[i] is None
]
)
stride[i] = self.create_symbol(
val,
TensorPropertySource(source, TensorProperty.STRIDE, i)
)
assert all(x is not None for x in stride)
sym_size = [self.create_symintnode(i) for i in size]
sym_stride = []
for i, stride_expr in enumerate(stride):
# NB: Don't duck size the stride; instead use the expression
# we computed
assert stride_expr is not None
sym_stride.append(self.create_symintnode(stride_expr))
sym_storage_offset = self.create_symintnode(self.create_symbol(
ex.storage_offset(),
TensorPropertySource(source, TensorProperty.STORAGE_OFFSET)
))
return sym_size, sym_stride, sym_storage_offset
def create_symintnode(self, sym: "sympy.Expr"):
return SymInt(SymNode(sym, self, int))
def create_unbacked_symfloat(self):
symbol = Symbol(f"f{next(self.unbacked_symfloat_counter)}")
symbol.stack = ''.join(traceback.format_list(traceback.extract_stack()[:-1]))
return SymFloat(SymNode(symbol, self, float))
def create_unbacked_symint(self):
symbol = Symbol(f"i{next(self.unbacked_symint_counter)}", integer=True)
symbol.stack = ''.join(traceback.format_list(traceback.extract_stack()[:-1]))
return SymInt(SymNode(symbol, self, int))
# This is guaranteed to return a symbol or its negation is a sympy.Symbol,
# but there may be a replacement that allows it to be immediately
# simplified
def create_symbol(self, val: int, source: Source) -> "sympy.Expr":
assert isinstance(source, Source), f"{type(source)} {source}"
if not HAS_SYMPY:
raise RuntimeError("Need sympy installed to create symbolic shapes")
if val < 0:
from torch._dynamo.source import NegateSource
return -self.create_symbol(-val, NegateSource(source))
# Now attempt to duck size this value
# TODO: Use site has to duck size
# TODO: Do this duck sizing lazily later
# Create a duck sized int if necessary
if val not in self.val_to_var:
sympy_expr = Symbol(f"s{len(self.var_to_val)}", positive=True, integer=True)
self.var_to_val[sympy_expr] = sympy.Integer(val)
self.val_to_var[val] = sympy_expr
# This implements duck-shaping: input sizes that match are assigned
# the same symint
r = self.duck_int(val)
if isinstance(r, Symbol):
r.sources.append(source)
return r
# Given a concrete integer value, return the duck sized symbol associated
# with it; e.g., suppose we already have a tensor of size 3 in scope,
# which was assigned s3, then shape_env.duck_int(3) we will get back s3.
# This has some pretty tricky preconditions associated with it, so if
# you are in a binding context, you probably wanted create_symbol instead.
def duck_int(self, val):
assert val in self.val_to_var, (
"Direct call to duck_int MUST only duck size an integer values "
"that have already produced by inputs (allocated "
"by create_symbol), or we risk being unable to instantiate the "
"symbolic variable later. However, at time of this call "
f"val={val} was not duck sized. Bound duck sized integers: "
f"{list(self.val_to_var.keys())}"
)
return self.val_to_var[val]
# Generates a Python string which, when evaluated in a context that
# defines tensors for all the sources, returns True or False depending
# on if the guards evaluated to True or not. Primarily used by Dynamo,
# but this is also helpful for manual testing of guards (see
# evaluate_guards_for_args)
def codegen_guards(self, placeholders, sources,
source_ref=lambda n: n.name()):
# It took a lot of sweat to figure out the algorithm here. Let's
# explain how it works.
#
# The ShapeEnv lifecycle looks something like this:
#
# - For each input, you either generate a fresh Sympy symbol (s0) to
# represent its value (a binding site), or you reuse some
# preexisting symbol or expression, skipping the symbol allocation
# (e.g., duck sizing to a preexisting symbol, or expressing a
# stride as a multiplication of a separate stride and size.)
# Naively, you might expect to bind a fresh Sympy symbol for
# every input, but this is fairly wasteful as most of these
# symbols immediately simplify away, and if you don't eagerly
# specialize, e.g., 0/1 symbols, you end up with very complicated
# expressions that are not optimizable in practice.
#
# - You perform some compute on these symbols, occasionally
# introducing guards on boolean expressions on these symbols.
# In particular, whenever we guard on equality (_maybe_guard_eq),
# we can simplify shapes; e.g., when s0 == s1 * 2, we can now
# replace all occurrences of s0 with s1 * 2. Sometimes, a
# boolean expression evaluation doesn't introduce a guard, as
# the guard is already entailed by the simplifications we have
# applied.
#
# - In the end, you have a bunch of replacements (saying how to
# simplify shapes) and a bunch of guards (all the equality guards
# are trivial, because they're covered by the replacements).
#
# From the ShapeEnv, we must generate a Python expression that, when
# evaluated on a set of inputs, tells us whether or not these boolean
# expressions would have evaluated in the same way. However,
# we cannot easily compute this, as we elide recording boolean
# expressions when we think they are vacuously true. Thus, we seek
# an approximation: we must generate an expression, if true, would have
# produced an "equivalent" ShapeEnv, which would answer guard
# expressions in the same way.
#
# Our notion of equivalence is a bit subtle. For example, consider
# the ShapeEnv created from an input of size (5, 4) versus (4, 4)
# (no other guards.) Duck sizing would generate (s0, s1) in the first
# case but (s0, s0) in the second. We do NOT assume that size
# variables are disjoint; so in fact a graph that assumes the input
# could be (s0, s1) subsumes (s0, s0) (setting s0 == s1), but not
# vice versa. However, consider an analogous case (1,) versus (2,).
# Duck sizing generates (1,) and (s0,); the (s0,) graph does NOT
# subsume the (1,) graph because we assume that any size variables
# is NOT 0/1 (and make simplifications according to this; e.g., if
# we queried s0 == 0, we would immediately return False without
# returning a guard.)
#
# So, it is perhaps easier to flip things on their head: the guard
# expressions we generate here say what simplifications are valid,
# and what are not. Below, we explain each of the guard expressions
# we generate
# TODO: Make this more efficient by binding all the size/stride/offsets
# to locals before performing tests on them.
from torch._dynamo.source import NegateSource, TensorPropertySource, TensorProperty
# Actual codegen must be delayed as we don't necessarily know what
# the symbol mapping is
input_guards = []
symbol_to_source = collections.defaultdict(list)
# How do we know what the value of s0 is? Fresh variables can only be
# bound by inputs, so there MUST be some other input which binds the
# variable. If there is no such input, this is an error in our
# system. We record where all symbols come from, to help you diagnose
# why those symbols didn't occur.
#
# In fact, generally speaking it is only possible for the "outermost"
# user of a ShapeEnv to evaluate the guards, because some inputs may
# not be available to inner levels. For example, Dynamo can guard on
# tensors that never actually become graph arguments (they are
# pruned). In this case, only Dynamo knows about these arguments.
def track_symint(source, val):
if isinstance(val, SymInt):
s = val.node.expr
if isinstance(s, sympy.Symbol):
symbol_to_source[s].append(source)
elif isinstance(-s, sympy.Symbol):
symbol_to_source[-s].append(NegateSource(source))
input_guards.append((source, s))
else:
input_guards.append((source, sympy.Integer(val)))
for t, source in zip(placeholders, sources):
assert isinstance(source, Source)
if t is None:
continue
if isinstance(t, SymInt):
track_symint(source, t)
continue
assert isinstance(t, torch.Tensor)
for i, s in enumerate(t.size()):
track_symint(TensorPropertySource(source, TensorProperty.SIZE, i), s)
for i, s in enumerate(t.stride()):
track_symint(TensorPropertySource(source, TensorProperty.STRIDE, i), s)
track_symint(TensorPropertySource(source, TensorProperty.STORAGE_OFFSET), t.storage_offset())
# 1. Every input must equal the final simplified symbolic expression
# stored on the placeholder. Given a placeholder (s0*2, s1),
# if we have an input (2, 3), we must show s0*2 == 2 and s1 == 3.
# This does a lot of work: it covers duck sizing and equality guards.
exprs = []
for source, expr in input_guards:
# Small optimization
if (
isinstance(expr, Symbol) and
expr in symbol_to_source and
source == symbol_to_source[expr][0]
):
continue
sexpr = ShapeGuardPrinter(symbol_to_source, source_ref).doprint(expr)
exprs.append(f"{source_ref(source)} == {sexpr}")
# 2. Every guard must evaluate to True (but remember many guards
# like s0 == s1*2 because trivial due to simplification)
for g, tb in self.guards:
if self._maybe_evaluate_static(g) is not None:
continue
g = self.simplify(g)
try:
exprs.append(ShapeGuardPrinter(symbol_to_source, source_ref).doprint(g))
except Exception:
log.warning(f"Failing guard allocated at: \n{tb}")
raise
# 3. Every symbol must not be equal to 0/1
for sources in symbol_to_source.values():
assert sources
# We must assert that each symbol is not zero or one, as we make
# negative inferences on shape variables
exprs.append(f"{source_ref(sources[0])} != 0 and {source_ref(sources[0])} != 1")
if exprs:
return " and ".join(exprs)
else:
return "True"
def evaluate_guards_for_args(self, placeholders, args):
from torch._dynamo.source import GlobalSource
arg_names = [f"t{i}" for i in range(len(args))]
code = self.codegen_guards(placeholders, [GlobalSource(a) for a in arg_names])
return eval(code, {}, dict(zip(arg_names, args)))
def bind_symbols(self, placeholders, args):
# Given a paired list of placeholders (fake tensors with
# symbolic sizes) and concrete arguments (regular tensors
# with real sizes), returns a dictionary mapping each
# symbol to its real value. So for example, if you
# have a placeholder with size (s0, s1), binding
# (2, 4) to it will give you {s0: 2, s1: 4}. This is
# not guaranteed to bind ALL symbols in the ShapeEnv;
# we can't bind a symbol if it doesn't occur in any placeholder,
# and symbols that already have replacements won't get bindings.
# This is a little duplicative with evaluate_guards but
# it's different enough that it seemed cleanest to make
# another copy. This assumes the guards are already checked,
# though if it's cheap we'll check for shenanigans
bindings: Dict[sympy.Symbol, int] = {}
def bind_symint(arg, val):
if isinstance(val, SymInt):
s = val.node.expr
if isinstance(s, sympy.Symbol):
if s in bindings:
assert bindings[s] == arg, f"{bindings[s]} != {arg}"
else:
bindings[s] = arg
elif isinstance(-s, sympy.Symbol):
if -s in bindings:
assert bindings[-s] == -arg, f"{bindings[-s]} != {-arg}"
else:
bindings[-s] = -arg
for t, arg in zip(placeholders, args):
if t is None:
continue
if isinstance(t, SymInt):
bind_symint(arg, t)
continue
assert isinstance(t, torch.Tensor)
for i, s in enumerate(t.size()):
bind_symint(arg.size(i), s)
for i, s in enumerate(t.stride()):
bind_symint(arg.stride(i), s)
bind_symint(arg.storage_offset(), t.storage_offset())
return bindings
def get_nontrivial_guards(self):
return [self.simplify(guard.expr) for guard in self.guards if self._maybe_evaluate_static(guard.expr) is None]
def format_guards(self, verbose=False):
def format_tb(tb):
if not verbose:
return ""
return f"\n Guarded at:\n{textwrap.indent(tb, ' ')}"
return '\n'.join(f" - {guard.expr}{format_tb(guard.stack)}" for guard in self.guards)
def get_shape_groups(self):
shape_groups = collections.defaultdict(list)
for k, v in self.replacements.items():
shape_groups[v].append(k)
return shape_groups
@_lru_cache
def _maybe_evaluate_static(self, expr: "sympy.Expr") -> "Optional[sympy.Expr]":
"""
Tries to evaluate expr without introducing guards
"""
expr = self.simplify(expr)
# Simplifies assuming that shape vars > 1 (since we cache on 0/1 shape values)
symbols = list(expr.free_symbols)
new_shape_env = {
k: sympy.Symbol(f"shape_{idx}", positive=True, integer=True) + 1
for idx, k in enumerate(symbols)
# Do not assume unbacked symints are > 1
if k in self.var_to_val
}
new_expr = expr.xreplace(new_shape_env)
floor_div_replace = {}
for atom in new_expr.atoms(FloorDiv):
floor_div_replace[atom] = sympy.floor(atom.args[0] / atom.args[1])
new_expr = safe_expand(new_expr.xreplace(floor_div_replace))
if len(list(new_expr.free_symbols)) == 0:
return new_expr
return None
@_lru_cache
def replace(self, expr: "sympy.Expr") -> "sympy.Expr":
replacements = {s: self._find(cast(sympy.Symbol, s)) for s in expr.free_symbols}
return safe_expand(expr.xreplace(replacements))
@_lru_cache
def _update_divisible(self):
new_divisible = set()
for k in self.divisible:
res = self.replace(k)
if len(res.free_symbols) > 0:
new_divisible.add(k)
self.divisible = new_divisible
@_lru_cache
def simplify(self, expr: "sympy.Expr") -> "sympy.Expr":
expr = self.replace(expr)
if expr.has(FloorDiv):
self._update_divisible()
div_replacements = {}
for atom in expr.atoms(FloorDiv):
base, divisor = atom.args
if self.replace(base % divisor) in self.divisible:
div_replacements[atom] = base / divisor
expr = expr.xreplace(div_replacements)
expr = safe_expand(expr)
return expr
@lru_cache(256)
def size_hint(self, expr: "sympy.Expr"):
"""
Gets a size hint for a given expression from the underlying shapes we had.
Does not introduce a guard, so only use this when you can guarantee that
your code is still valid for arbitrary shapes (such as optimization decisions)
"""
result_expr = safe_expand(expr).xreplace(self.var_to_val)
if len(result_expr.free_symbols) != 0:
raise self._make_data_dependent_error(result_expr)
return result_expr
def _make_data_dependent_error(self, expr):
# TODO: in a Dynamo context, having user code, and having the
# name of the local, will be much better
accesses = '\n\n'.join(
f"Data dependent variable '{s}' allocated at:\n{s.stack}"
for s in expr.free_symbols
)
return GuardOnDataDependentSymNode(
f"\n\n{accesses}\n"
"GuardOnDataDependentSymNode: It appears that you're trying to get "
"a value out of symbolic int/float "
"whose value is data-dependent (and thus we do not know the true value.) "
f"The expression we were trying to evaluate is {expr}. "
"Scroll up to see where each of these data-dependent accesses originally occurred."
# TODO: Help text about how to use our runtime tests to fix this
# problem
)
@_lru_cache
def _find(self, a: "sympy.Symbol") -> "sympy.Expr":
"""
Implements a DSU-like algorithm to find the variable that represents a
Also handles transitive non-identity replacements.
a: b + c
c: d
"""
if a not in self.replacements:
return a
res = self.replacements[a]
cur_replace = {s: self._find(s) for s in res.free_symbols}
self.replacements[a] = self.replacements[a].xreplace(cur_replace)
return self.replacements[a]
@lru_cache(256)
def _maybe_guard_eq(self, expr: Union["sympy.Eq", "sympy.Ne"]) -> None:
"""
Evaluates the result of an eq call. If true, uses information to
simplify shapes (i.e. a == b or a % 5 == 0)
"""
concrete_bool = bool(self.size_hint(expr))
if isinstance(expr, sympy.Eq):
if not concrete_bool:
return
# NB: Apparently this is load bearing; to see what test fails if
# you comment it out run:
# python test/functorch/test_aotdispatch.py -k
# test_aot_autograd_symbolic_module_exhaustive_nn_LazyConv3d_cpu_float32
elif isinstance(expr, sympy.Ne):
if concrete_bool:
return
free = list(expr.free_symbols)
assert len(free) > 0, "The expression should not be static by this point"
# In case of really gnarly expression, we don't blow up
if len(free) > 5:
return
free = sorted(free, key=lambda x: (self.size_hint(x), x.name), reverse=True) # type: ignore[attr-defined]
lhs = expr.lhs
rhs = expr.rhs
if not expr.has(sympy.Mod):
try:
solutions = sympy.solve(lhs - rhs, free[0], dict=True)
if len(solutions) != 1:
return
solution = solutions[0][free[0]]
if all(t.is_integer for t in sympy.preorder_traversal(solution)):
new_var = self._find(solution)
self.replacements[cast(sympy.Symbol, free[0])] = new_var
except NotImplementedError:
pass
except RecursionError:
log.warning(f"RecursionError in sympy.solve({lhs} - {rhs}, {free[0]})")
if expr.has(sympy.Mod):
mod_expr = tuple(expr.atoms(sympy.Mod))[0]
try:
solutions = sympy.solve(lhs - rhs, mod_expr, dict=True)
if len(solutions) == 1 and solutions[0][mod_expr] == 0:
self.divisible.add(mod_expr)
except NotImplementedError:
pass
return
@lru_cache(256)
def evaluate_expr(self, expr: "sympy.Expr"):
"""
Given an expression, evaluates it, adding guards if necessary
"""
if len(expr.free_symbols) == 0:
return expr
expr = self.simplify(expr)
static_expr = self._maybe_evaluate_static(expr)
if static_expr is not None:
return static_expr
if isinstance(expr, (sympy.Eq, sympy.Ne)):
self._maybe_guard_eq(expr)
# TODO: If we successfully eliminate a symbol via equality, it
# is not actually necessary to save a guard for the equality,
# as we will implicitly generate a guard when we match that
# input against the symbol
concrete_val = self.size_hint(expr)
# TODO: optimize this; avoid formatting traces until we need them
# NB: drop two frames; evaluate_expr and the Sym* function that
# actually called us
if not self._suppress_guards_tls():
stack = ''.join(traceback.format_list(traceback.extract_stack()[:-2]))
if concrete_val is sympy.true:
self.guards.append(ShapeGuard(expr, stack))
elif concrete_val is sympy.false:
self.guards.append(ShapeGuard(sympy.Not(expr), stack))
else:
self.guards.append(
ShapeGuard(sympy.Eq(expr, concrete_val), stack)) # type: ignore[arg-type]
return concrete_val
| [
"[email protected]"
] | |
ecbffecc0cc2e41903c8ea05f722a15622f58247 | 16bcda63a6bba61af0c835aabd73b0467696be4a | /itertools.permutations()/new 1.py | fc66ce8640fb164b234f6d40cbf4f58b749a3cac | [] | no_license | ZirvedaAytimur/HackerRank_Python | 8ea7d629e75b4db31e04c64d102bf7a0a0b5632b | 175017ed3d8ff11385e12e926b94da97797fc094 | refs/heads/master | 2023-03-13T13:00:42.225364 | 2021-03-10T20:36:18 | 2021-03-10T20:36:18 | 288,210,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from itertools import permutations
S, N = input().split()
print(*[''.join(i) for i in permutations(sorted(S), int(N))], sep="\n") | [
"[email protected]"
] | |
fe770d37c3f012383470ba567f00d7c563d70240 | c37e63a902106bbde63fb5517f420e2c043c7f93 | /6.第六類/PYD608.py | f2831263799476d02d6107d8dda3737c6e08f4e2 | [] | no_license | junyi1997/TQC_Python | 35b1320ca7a1b2f8eee4b9e8d4f1b9d7f4a5c02f | befe177880a7034f37848ff404bb7d33f9a07ff9 | refs/heads/master | 2020-04-25T04:05:57.508858 | 2019-03-24T17:03:07 | 2019-03-24T17:03:07 | 172,499,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:16:26 2018
@author: user
最大最小值索引
"""
data=[]
d_max=0
d_min=99999
for i in range(3):
data.append([])
for j in range(3):
num=eval(input())
data[i].append(num)
if num > d_max:
d_max=num
d_ind=(i,j)
if num < d_min:
d_min=num
da_ind=(i,j)
print("Index of the largest number {:} is: ({:}, {:})".format(d_max,d_ind[0],d_ind[1]))
print("Index of the smallest number {:} is: ({:}, {:})".format(d_min,da_ind[0],da_ind[1])) | [
"[email protected]"
] | |
7762a95c3c88f5454866b49404c19391dca3536e | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Django/Django_Old/disa-py/member/views.py | 30f6e4591e70ac1b02496798d564776ff26613a4 | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,082 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from django.views.generic import TemplateView
from rest_framework import viewsets
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import connection
from django.template.engine import _context_instance_undefined
from member.models import Member, Seva, Address, SevaCategory, NakshatramType, Organisation
from member.serializers import MemberSerializer, SevaCategorySerializer, NakshatramSerializer, \
OrganisationSerilalizer, MemberSevaSerializer
# Create your views here.
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from rest_framework.renderers import JSONRenderer
from django.template.loader import render_to_string
from rolepermissions.decorators import has_role_decorator, has_permission_decorator
from django.contrib.auth import get_user_model
User = get_user_model()
@method_decorator(login_required, name='dispatch')
class MemberHomeView(TemplateView):
template_name = "member/member.html"
http_method_names = ['get']
def get_context_data(self, **kwargs):
context = super(MemberHomeView, self).get_context_data(**kwargs)
seva_category_serializer = SevaCategorySerializer(SevaCategory.objects.all(), many=True)
nakshatram_serializer = NakshatramSerializer(NakshatramType.objects.all(), many=True)
organisation_serializer = OrganisationSerilalizer(Organisation.objects.all(), many=True)
context['seva_category_collection'] = JSONRenderer().render(seva_category_serializer.data)
context['nakshatram_collection'] = JSONRenderer().render(nakshatram_serializer.data)
context['organisation_collection'] = JSONRenderer().render(organisation_serializer.data)
return context
@method_decorator(login_required, name='dispatch')
class MemberViewSet(viewsets.ModelViewSet):
empty_queryset = Member.objects.none()
queryset = Member.objects.all()
serializer_class = MemberSerializer
def list(self, request, *args, **kwargs):
queryset = Member.objects
query_params = request.query_params
# Flag to check if query was filtered at all.
filtered = None
q = query_params.get('q', None)
if q:
filtered = True
queryset = queryset.filter(search__icontains=q)
name = query_params.get('name', None)
if name:
filtered = True
queryset = queryset.filter(name__icontains=name)
surname = query_params.get('surname', None)
if surname:
filtered = True
queryset = queryset.filter(surname__icontains=surname)
mid = query_params.get('mid', None)
if mid:
filtered = True
queryset = queryset.filter(mid=mid)
place = query_params.get('place', None)
if place:
filtered = True
queryset = queryset.filter(place__icontains=place)
email = query_params.get('email', None)
if email:
filtered = True
queryset = queryset.filter(email__icontains=email)
mobile = query_params.get('mobile', None)
if mobile:
filtered = True
queryset = queryset.filter(mobile__icontains=mobile)
phone = query_params.get('phone', None)
if phone:
filtered = True
queryset = queryset.filter(phone__icontains=phone)
self.queryset = queryset
if not filtered:
self.queryset = self.empty_queryset
return super(MemberViewSet, self).list(self, request, args, kwargs)
def retrieve(self, request, *args, **kwargs):
query_params = request.query_params
subscriptions = query_params.get('subscriptions', None)
if subscriptions:
# Subscriptions are also asked. Hence use Member seva serialiser
# which also returns sevas
self.serializer_class = MemberSevaSerializer
return super(MemberViewSet, self).retrieve(self, request, args, kwargs)
from django.core.mail import send_mail, BadHeaderError
from .forms import EmailForm, SmsForm, ReportsForm
from member.sms import Broadcast_SMS
from django.views.generic import TemplateView
from django.shortcuts import redirect, render, render_to_response
from django.template import RequestContext
from disa.middleware.current_user import get_current_user
@method_decorator(login_required, name="get")
class CustomEmail(TemplateView):
template_name = "email_form.html"
@method_decorator(has_permission_decorator("SEND_CUSTOM_MESSAGE"))
def get(self, request, *args, **kwargs):
form = EmailForm()
return render(request, self.template_name, {'form': form})
@method_decorator(has_permission_decorator("SEND_CUSTOM_MESSAGE"))
def post(self, request, *args, **kwargs):
form = EmailForm(request.POST, request.FILES)
# Read the content from the form if the form is valid
if form.is_valid():
recipient_list = []
bcc_list = []
cc_list = []
# Read the required fields from the form
group = form.cleaned_data['groups']
mode = form.cleaned_data['mode']
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
attachment = request.FILES.getlist('attachment')
cc_list = request.POST.getlist('cc_list')
cc_list = cc_list[0].split(",")
# If mode is other than others get the recipient_list from database
if mode == 'others':
recipient_list = request.POST.getlist('recipient_list')
recipient_list = recipient_list[0].split(",")
#In case of sending mails to groups obtained from data base add the recipient list to bcc
#to avoid sharing of email ids
else:
bcc_list = self.GetRecipientList(mode, group)
bcc_list = bcc_list[0].split(",")
#In case of others bcc_list is null, in order to send mails to recipient_list max() is introduced
for count in xrange(0, max(len(bcc_list),1), settings.MAX_RECIPIENTS):
email = EmailMessage(to = recipient_list, from_email =
settings.DEFAULT_FROM_EMAIL, bcc = bcc_list[count:count+settings.MAX_RECIPIENTS], cc = cc_list, body = message, subject = subject)
if attachment:
for eachAttachment in attachment:
file = eachAttachment
if hasattr(file, 'path'):
email.attach_file(file.path)
else:
email.attach(file.name, file.read())
try:
email.send(fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found.')
return render_to_response('email_response.html',context_instance=RequestContext(request))
return redirect('email')
''' Gets recipientList from database based on the mode and group '''
def GetRecipientList(self, mode, group):
cur = connection.cursor()
query = "Select name from sevaCategories;"
if mode == 'all':
query = 'Select email from members'
elif mode == 'group':
try:
query = 'select id from sevaCategories where name='+" '"+group+" '"
cur.execute(query)
sid = cur.fetchone()[0]
query = "select email from members, sevas where sevas.sid="+ " '"+sid+ " '"+ "AND sevas.mid=members.id";
except:
query = 'select id from groupNames where name='+" '"+group+" '"
cur.execute(query)
gid = cur.fetchone()[0]
query = "select email from groupData where gid="+ " '"+gid+ " '"
cur.execute(query)
recipient_list = cur.fetchall()
cur.close()
return [','.join([each[0] for each in recipient_list])]
@method_decorator(login_required, name="get")
class CustomSms(TemplateView):
template_name = "sms_form.html"
@method_decorator(has_permission_decorator("SEND_CUSTOM_MESSAGE"))
def get(self, request, *args, **kwargs):
form = SmsForm()
return render(request,self.template_name, {'form': form})
@method_decorator(has_permission_decorator("SEND_CUSTOM_MESSAGE"))
def post(self, request, *args, **kwargs):
form = SmsForm(request.POST)
if form.is_valid():
group = form.cleaned_data['groups']
mode = form.cleaned_data['mode']
message_sms = form.cleaned_data['message']
number_list = []
message = str(render_to_string('customsms.txt')).format(message=message_sms)
# If mode is other than others get the recipient_list from database
if mode == 'others':
number_list = request.POST.getlist('number_list')[0].split(",")
try:
sms = Broadcast_SMS(number_list, message)
except BadHeaderError:
return HttpResponse('Invalid header found.')
else:
groupAll_list = self.GetNumberList(mode, group)
groupAll_list = groupAll_list[0].split(",")
for count in xrange(0, len(groupAll_list), 20):
try:
sms = Broadcast_SMS(groupAll_list[count:count+20], message)
except BadHeaderError:
return HttpResponse('Invalid header found.')
return render_to_response('sms_response.html',context_instance=RequestContext(request))
return redirect('sms')
''' Gets numberList from database based on the mode and group '''
def GetNumberList(self, mode, group):
cur = connection.cursor()
query = "Select name from sevaCategories;"
if mode == 'all':
query = 'Select mobile from members'
elif mode == 'group':
try:
query = 'select id from sevaCategories where name='+" '"+group+" '"
cur.execute(query)
sid = cur.fetchone()[0]
query = "select mobile from members, sevas where sevas.sid="+ " '"+sid+ " '"+ "AND sevas.mid=members.id";
except:
query = 'select id from groupNames where name='+" '"+group+" '"
cur.execute(query)
gid = cur.fetchone()[0]
query = "select mobile from groupData where gid="+ " '"+gid+ " '"
cur.execute(query)
number_list = cur.fetchall()
cur.close()
return [','.join([each[0] for each in number_list])]
@method_decorator(login_required, name="get")
class Reports(TemplateView):
template_name = "sevaReports.html"
| [
"[email protected]"
] | |
b46c0d94a3c555f1f2593bb99544a3614bf282bd | 4bf3aaf77c309a489100b98a8c03532632df152c | /Python/동빈북/그리디/무지의먹방라이브.py | c99f1764e90f9f5916ae8413225428dd15fcbe6a | [] | no_license | murane/PS | 7fbfc54d962231949efc67f1a35c4b0119de0780 | e938c6c503aeac08bf65e1e66709172b0e5da6ef | refs/heads/master | 2023-05-06T22:51:54.105811 | 2021-05-30T03:34:53 | 2021-05-30T03:34:53 | 293,699,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import heapq
def solution(food_times, k):
heap=[]
if sum(food_times)<=k:
return -1
for idx,time in enumerate(food_times):
heapq.heappush(heap,(time,idx+1))
tot_cnt=0
cur_cnt=0
while heap:
tmp_time,tmp_idx=heapq.heappop(heap)
tot_cnt+=(tmp_time-cur_cnt)*(len(heap)+1)
cur_cnt=tmp_time
if tot_cnt>=k:
heapq.heappush(heap,(tmp_time,tmp_idx))
break
heap.sort(key=lambda x: x[1])
return heap[(k-tot_cnt)%len(heap)][1]
if __name__ == '__main__':
food_times=[4,2,3,6,7,1,5,8]
k=27
#food_times=[3,1,2]
#k=1
answer = 5
print(solution(food_times, k))
| [
"[email protected]"
] | |
2edf5053669ffa0143f87c4e40cfcf93088f2495 | c753216f44c4c5f34d50763a02d720e064ed5d13 | /OPSI/web2/test/test_log.py | 5909fce6269025ae6359143e64908bd32be8d23e | [] | no_license | mpice-mn/python-opsi | 7fefcd590213a5b698022323b166710e8cbf5641 | 76dcd4e38100e019f64731539b31be6e8af60af7 | refs/heads/stable | 2023-05-02T05:25:31.478822 | 2020-02-05T21:16:50 | 2020-02-05T21:16:50 | 104,738,074 | 0 | 0 | null | 2017-09-25T10:49:13 | 2017-09-25T10:49:13 | null | UTF-8 | Python | false | false | 4,489 | py | # Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
from OPSI.web2 import log, resource, http
from OPSI.web2.test.test_server import BaseCase, BaseTestResource
from twisted.python import log as tlog
class BufferingLogObserver(log.BaseCommonAccessLoggingObserver):
"""
A web2 log observer that buffer messages.
"""
messages = []
def logMessage(self, message):
self.messages.append(message)
class SetDateWrapperResource(resource.WrapperResource):
"""
A resource wrapper which sets the date header.
"""
def hook(self, req):
def _filter(req, resp):
resp.headers.setHeader('date', 0.0)
return resp
_filter.handleErrors = True
req.addResponseFilter(_filter, atEnd=True)
class NoneStreamResource(resource.Resource):
"""
A basic empty resource.
"""
def render(self, req):
return http.Response(200)
class TestLogging(BaseCase):
def setUp(self):
self.blo = BufferingLogObserver()
tlog.addObserver(self.blo.emit)
# some default resource setup
self.resrc = BaseTestResource()
self.resrc.child_emptystream = NoneStreamResource()
self.root = SetDateWrapperResource(log.LogWrapperResource(self.resrc))
def tearDown(self):
tlog.removeObserver(self.blo.emit)
def assertLogged(self, **expected):
"""
Check that logged messages matches expected format.
"""
if 'date' not in expected:
epoch = log.BaseCommonAccessLoggingObserver().logDateString(0)
expected['date'] = epoch
if 'user' not in expected:
expected['user'] = '-'
if 'referer' not in expected:
expected['referer'] = '-'
if 'user-agent' not in expected:
expected['user-agent'] = '-'
if 'version' not in expected:
expected['version'] = '1.1'
if 'remotehost' not in expected:
expected['remotehost'] = 'remotehost'
messages = self.blo.messages[:]
del self.blo.messages[:]
expectedLog = ('%(remotehost)s - %(user)s [%(date)s] "%(method)s '
'%(uri)s HTTP/%(version)s" %(status)d %(length)d '
'"%(referer)s" "%(user-agent)s"')
if expected.get('logged', True):
# Ensure there weren't other messages hanging out
self.assertEquals(len(messages), 1, "len(%r) != 1" % (messages, ))
self.assertEquals(messages[0], expectedLog % expected)
else:
self.assertEquals(len(messages), 0, "len(%r) != 0" % (messages, ))
def test_logSimpleRequest(self):
"""
Check the log for a simple request.
"""
uri = 'http://localhost/'
method = 'GET'
def _cbCheckLog(response):
self.assertLogged(method=method, uri=uri, status=response[0],
length=response[1].getHeader('content-length'))
d = self.getResponseFor(self.root, uri, method=method)
d.addCallback(_cbCheckLog)
return d
def test_logErrors(self):
"""
Test the error log.
"""
def test(_, uri, method, **expected):
expected['uri'] = uri
expected['method'] = method
def _cbCheckLog(response):
self.assertEquals(response[0], expected['status'])
self.assertLogged(
length=response[1].getHeader('content-length'), **expected)
return self.getResponseFor(self.root,
uri,
method=method).addCallback(_cbCheckLog)
uri = 'http://localhost/foo' # doesn't exist
method = 'GET'
d = test(None, uri, method, status=404, logged=True)
# no host. this should result in a 400 which doesn't get logged
uri = 'http:///'
d.addCallback(test, uri, method, status=400, logged=False)
return d
def test_logNoneResponseStream(self):
"""
Test the log of an empty resource.
"""
uri = 'http://localhost/emptystream'
method = 'GET'
def _cbCheckLog(response):
self.assertLogged(method=method, uri=uri, status=200,
length=0)
d = self.getResponseFor(self.root, uri, method=method)
d.addCallback(_cbCheckLog)
return d
| [
"[email protected]"
] | |
283d5c1bd4575915f70b11a4be5c0d487bfa15f2 | 99ed889f5d679f0712a9578435819ff9fe1038e9 | /run_scripts/functional/generate_dataset.py | 972ff5ae78479770f3dfaec2da57e2a2f0771851 | [] | no_license | WMViolet/baselines_tactile | 7e7800c44167d6e29f4f4a187e49d92462f49100 | 761193122ff8c914d8b983d93620a7ffc63ea917 | refs/heads/main | 2023-02-24T00:30:04.616016 | 2021-02-01T23:45:53 | 2021-02-01T23:45:53 | 322,393,115 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,633 | py | import sys
import re
import multiprocessing
import os.path as osp
from envs import gym
from collections import defaultdict
import os
import json
import tensorflow as tf
import numpy as np
from experiment_utils.run_sweep import run_sweep
import tactile_baselines.her.experiment.config as configure
from tactile_baselines.her.experiment.config import configure_her
from tactile_baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv
from tactile_baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from tactile_baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env
from tactile_baselines.common.tf_util import get_session
from tactile_baselines.common.replay_buffers import SequenceReplayBuffer
from run_scripts.functional.rollout import RolloutWorker
from tactile_baselines import logger
from importlib import import_module
from pdb import set_trace as st
import dill as pickle
INSTANCE_TYPE = 'c4.8xlarge'
EXP_NAME = 'supervised'
class ClassEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': o.__module__ + "." + o.__name__}
if callable(o):
return {'function': o.__name__}
return json.JSONEncoder.default(self, o)
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env.entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
def train(args, extra_args):
env_type, env_id = get_env_type(args)
print('env_type: {}'.format(env_type))
total_timesteps = int(args.num_timesteps)
seed = args.seed
num_cpu = args.num_cpu
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
obs_type = args.obs_type
process_type = args.process_type
feature_dim = args.feature_dim
fixed_num_of_contact = args.fixed_num_of_contact
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
num_cpu = num_cpu,
obs_type = obs_type,
process_type = process_type,
feature_dim = feature_dim,
fixed_num_of_contact = [fixed_num_of_contact, False],
**alg_kwargs
)
return model, env
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
obs_type = args.obs_type
fixed_num_of_contact = args.fixed_num_of_contact
env_type, env_id = get_env_type(args)
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(env_id,
env_type,
args.num_env or 1,
seed,
reward_scale=args.reward_scale,
flatten_dict_observations=flatten_dict_observations,
obs_type = obs_type,
fixed_num_of_contact = [fixed_num_of_contact, False])
if env_type == 'mujoco':
env = VecNormalize(env, use_tf=True)
return env
def get_env_type(args):
env_id = args.env
if args.env_type is not None:
return args.env_type, env_id
# Re-parse the gym registry, since we could have new envs since last time.
for env in gym.envs.registry.all():
env_type = env.entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id) # This is a set so add is idempotent
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
if ':' in env_id:
env_type = re.sub(r':.*', '', env_id)
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type in {'atari', 'retro'}:
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['tactile_baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def main(**kwargs):
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_list = []
for key in kwargs.keys():
arg_list.append('--' + key)
arg_list.append(str(kwargs[key]))
arg_parser = common_arg_parser()
buffer_size = int(kwargs['buffer_size'])
args, unknown_args = arg_parser.parse_known_args(arg_list)
extra_args = parse_cmdline_kwargs(unknown_args)
params = args.__dict__
import copy
params = copy.deepcopy(params)
if args.obs_type == 'object':
params['label'] = args.obs_type
elif args.obs_type == 'original':
params['label'] = 'object+joint'
elif args.obs_type == 'contact':
params['label'] = 'object+contact(' + args.process_type + ')'
elif args.obs_type == 'full_contact':
params['label'] = 'object+joint+contact(' + args.process_type + ')'
exp_dir = os.getcwd() + '/data/' + EXP_NAME
logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')
json.dump(params, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = kwargs.get('gpu_frac', 0.95)
sess = tf.Session(config=config)
obs_type = params['obs_type']
fixed_num_of_contact = params['fixed_num_of_contact']
env = gym.make(params['env'], obs_type = obs_type, fixed_num_of_contact = [fixed_num_of_contact, False])
policy_type = kwargs['policy']
policy = pickle.load(open('../policy/' + str(args.env) + '/policy.pickle', 'rb'))
T = env._max_episode_steps
paths = generate_paths(policy,
T,
obs_type,
params['env'],
fixed_num_of_contact,
build_env(args),
contact_dim = env.contact_dim,
buffer_size = buffer_size,
policy_type = policy_type)
obs, act = paths.all_samples()
obs = process_episode(obs, env.contact_dim, fixed_num_of_contact)
folder = '../dataset/sequence/' + str(args.env) + '/seed' + str(kwargs['seed'])
with open(folder + '-dict.pickle', 'wb') as pickle_file:
print(folder)
pickle.dump([obs, act, fixed_num_of_contact], pickle_file)
def process_episode(observations, contact_dim, fixed_num_of_contact):
n, epi_length, obs_dim = observations.shape
observations = observations.reshape((-1, obs_dim))
contacts = observations[:, :contact_dim]
single_dim = contact_dim // fixed_num_of_contact
contacts = contacts.reshape((-1, fixed_num_of_contact, single_dim))
empty = -np.ones((contacts.shape[0], contacts.shape[1], 2))
indices = np.transpose((contacts[:, :, :-9]==1.0).nonzero())
rows, cols, vals = tuple(indices[:, 0][::2]), tuple(indices[:, 1][::2]), indices[:, 2]
empty[rows, cols] = vals.reshape((-1,2))
other_information = observations[:, contact_dim:].reshape((contacts.shape[0], -1))
transformed_dict = {}
transformed_contacts = np.concatenate((empty, contacts[:, :, -9:]), axis = -1)
transformed_dict['geom1s'] = np.expand_dims(empty[:, :, 0], axis = -1) #N*num_contact*1
transformed_dict['geom2s'] = np.expand_dims(empty[:, :, 1], axis = -1) #N*num_contact*1
transformed_dict['positions'] = contacts[:, :, -9:-6] #N*num_contact*3
transformed_dict['force'] = contacts[:, :, -6:] #N*num_contact*6
# dimension: 24:24:6:7
transformed_dict['object_position'] = other_information[:, 48+6:] #N*7
transformed_dict['object_vel'] = other_information[:, 48:48+6] #N*6
transformed_dict['joint_position'] = other_information[:, :24] #N*24
transformed_dict['joint_vel'] = other_information[:, 24:48] #N*24
for key in transformed_dict:
if len(transformed_dict[key].shape) == 2:
_, dim = transformed_dict[key].shape
transformed_dict[key] = transformed_dict[key].reshape((n, epi_length, dim))
elif len(transformed_dict[key].shape) == 3:
_, num, dim = transformed_dict[key].shape
transformed_dict[key] = transformed_dict[key].reshape((n, epi_length, num, dim))
return transformed_dict
def generate_paths(policy,
T,
obs_type,
env_name,
fixed_num_of_contact,
env,
contact_dim,
buffer_size,
policy_type):
rollout_params = {
'exploit': True,
'use_target_net': False,
'use_demo_states': True,
'compute_Q': False,
'T': T,
'contact_dim': contact_dim,
}
params = configure.DEFAULT_PARAMS
env_name = env.spec.id
params['env_name'] = env_name
replay_strategy = 'future'
params['replay_strategy'] = replay_strategy
params['obs_type'] = obs_type
params['fixed_num_of_contact'] = fixed_num_of_contact
if env_name in configure.DEFAULT_ENV_PARAMS:
params.update(configure.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
params = configure.prepare_params(params)
params['rollout_batch_size'] = env.num_envs
dims = configure.configure_dims(params)
# dims['o'] = policy.dimo
rollout_worker = RolloutWorker(env,
policy,
dims,
logger,
monitor=True,
noise_eps = 0.1,
random_eps = 0.1,
use_random_policy = (policy_type == 'random'),
**rollout_params)
rollout_worker.clear_history()
obs_dim = int(np.prod(env.observation_space['observation'].shape))
act_dim = int(np.prod(env.action_space.shape))
buffer = SequenceReplayBuffer(obs_dim, act_dim, buffer_size, episode_size = 100)
while buffer._size < buffer_size:
print(buffer._size, buffer_size)
episode = rollout_worker.generate_rollouts()
# (8.837223, -10.698544, 0.029151862) (0.9997292357316148, -0.9992342112650581, 0.014519963432654456)
# (11.591648, -14.190264, 0.03476813) (0.9978768825193871, -0.9985089559162041, 0.006812978986824379)
full_obs = episode['o'].reshape((-1, obs_dim)) #100, 583
acts = episode['u'].reshape((-1, act_dim))
buffer.add_samples(full_obs, acts)
return buffer
if __name__ == '__main__':
sweep_params = {
'alg': ['her'],
'seed': [2],
'env': ['HandManipulateEgg-v0'],
# Env Sampling
'num_timesteps': [1e6],
'fixed_num_of_contact': [9],
'buffer_size': [1e5//2],
# Problem Conf
'num_cpu': [1],
'obs_type': ['object_loc+rot+geom+contact_loc+force+other'],
'process_type': ['none'],
'feature_dim': [32],
'policy': ['trained'], #'random', 'trained'
}
run_sweep(main, sweep_params, EXP_NAME, INSTANCE_TYPE)
# python run_scripts/functional/generate_dataset.py
| [
"[email protected]"
] | |
f722f47de9df0ad22072198f7ee90be74d106206 | 1dd0e0cbe481e965b1c9886a3d44ca3a122ae2af | /medikit/pipeline.py | 59c97241d4e208de7b4bbea3d0301dc62eb5f615 | [] | no_license | sei-nicolas/medikit | 7dffc0a2866cba54023e19f5ebcd7c70b57359e8 | e06fe8bc80cfa9af6fdeedf1f5fc2ef2f662dd9b | refs/heads/master | 2020-07-07T00:02:18.040553 | 2019-06-30T08:33:42 | 2019-06-30T08:33:42 | 203,179,919 | 0 | 0 | null | 2019-08-19T13:36:52 | 2019-08-19T13:36:52 | null | UTF-8 | Python | false | false | 2,278 | py | """
Pipelines are a way to describe a simple step-by-step process, for example the release process.
"""
import datetime
import json
import logging
logger = logging.getLogger(__name__)
class Pipeline:
"""
Class to configure a pipeline.
"""
def __init__(self):
self.steps = []
def add(self, step):
self.steps.append(step)
return self
def remove(self, identity):
for i in range(len(self.steps)):
if identity == get_identity(self.steps[i]):
del self.steps[i]
break
def get_identity(step):
return str(step)
class ConfiguredPipeline:
"""
Used to actually load run and persist a configured pipeline.
"""
def __init__(self, name, pipeline, config=None):
self.name = name
self.steps = pipeline.steps
self.meta = {"created": str(datetime.datetime.now())}
self.config = config
def init(self):
for step in self.steps:
step.init()
def next(self):
for step in self.steps:
if not step.complete:
return step
raise StopIteration("No step left.")
@property
def current(self):
for i, step in enumerate(self.steps):
if not step.complete:
return i + 1
return len(self)
def __len__(self):
return len(self.steps)
def abort(self):
for step in self.steps:
step.abort()
def serialize(self):
return json.dumps(
{
"meta": {**self.meta, "updated": str(datetime.datetime.now())},
"steps": [[get_identity(step), step.get_state()] for step in self.steps],
},
indent=4,
)
def unserialize(self, serialized):
serialized = json.loads(serialized)
self.meta = serialized.get("meta", {})
steps = serialized.get("steps", [])
if len(steps) != len(self.steps):
raise IOError("Invalid pipeline state storage.")
for (identity, state), step in zip(steps, self.steps):
if get_identity(step) != identity:
raise IOError("Mismatch on step identity.")
step.set_state(state)
step.config = self.config
| [
"[email protected]"
] | |
784081e775bbe964c1190f6cd858499aab6d04fd | dc3b3615e0e94e730508229c0fc758fc09390856 | /src_nlp/tensorflow/toward_control/model/discriminator.py | 3b73306d82b4046fa5b0514ff421f7b6998eda2a | [
"MIT"
] | permissive | ashishpatel26/finch | 63993c208e0fc0ed52512420d38d03ce456116c1 | bf2958c0f268575e5d51ad08fbc08b151cbea962 | refs/heads/master | 2020-04-20T21:59:25.292770 | 2018-12-25T01:31:50 | 2018-12-25T01:31:50 | 169,125,127 | 0 | 1 | MIT | 2019-02-04T18:17:06 | 2019-02-04T18:17:06 | null | UTF-8 | Python | false | false | 4,199 | py | from base import BaseModel
from configs import args
import tensorflow as tf
class _Discriminator(BaseModel):
def __init__(self, build_graph=True):
super().__init__('Discriminator')
with tf.variable_scope(self._scope):
self.embedding = tf.get_variable('lookup_table', [args.vocab_size, args.embed_dims])
self.dropout_embed = tf.layers.Dropout(args.discriminator_dropout_rate)
self.attn_proj = tf.layers.Dense(1, tf.tanh)
self.output_proj = tf.layers.Dense(args.n_class)
if build_graph:
self.build_train_graph(dataloader)
self.build_predict_graph(dataloader)
def __call__(self, inputs, is_training, soft_inp=False):
with tf.variable_scope(self._scope):
if soft_inp:
_inputs = tf.reshape(inputs, [-1, args.vocab_size])
x = tf.matmul(_inputs, self.embedding)
batch_sz = tf.shape(inputs)[0]
x = tf.reshape(x, [batch_sz, args.max_len, args.embed_dims])
else:
x = tf.nn.embedding_lookup(self.embedding, inputs)
x = self.dropout_embed(x, training=is_training)
align = tf.squeeze(self.attn_proj(x), -1)
align = tf.expand_dims(tf.nn.softmax(align), -1)
x = tf.squeeze(tf.matmul(x, align, transpose_a=True), -1)
logits = self.output_proj(x)
return logits
class Discriminator(BaseModel):
def __init__(self, build_graph=True):
super().__init__('Discriminator')
with tf.variable_scope(self._scope):
self.embedding = tf.get_variable('lookup_table', [args.vocab_size, args.embed_dims])
self.dropout_embed = tf.layers.Dropout(args.discriminator_dropout_rate)
self.conv_k3 = tf.layers.Conv1D(args.n_filters, 3, activation=tf.nn.relu)
self.conv_k4 = tf.layers.Conv1D(args.n_filters, 4, activation=tf.nn.relu)
self.conv_k5 = tf.layers.Conv1D(args.n_filters, 5, activation=tf.nn.relu)
self.dropout_feat = tf.layers.Dropout(args.discriminator_dropout_rate)
self.hidden_proj = tf.layers.Dense(args.n_filters, tf.nn.relu)
self.output_proj = tf.layers.Dense(args.n_class)
if build_graph:
self.build_train_graph(dataloader)
self.build_predict_graph(dataloader)
def __call__(self, inputs, is_training, soft_inp=False):
with tf.variable_scope(self._scope):
if soft_inp:
_inputs = tf.reshape(inputs, [-1, args.vocab_size])
x = tf.matmul(_inputs, self.embedding)
batch_sz = tf.shape(inputs)[0]
x = tf.reshape(x, [batch_sz, args.max_len, args.embed_dims])
else:
x = tf.nn.embedding_lookup(self.embedding, inputs)
x = self.dropout_embed(x, training=is_training)
feat_map = []
for conv in [self.conv_k3, self.conv_k4, self.conv_k5]:
_x = conv(x)
_x = tf.layers.max_pooling1d(_x, _x.get_shape().as_list()[1], 1)
_x = tf.reshape(_x, (tf.shape(x)[0], args.n_filters))
feat_map.append(_x)
x = tf.concat(feat_map, -1)
x = self.dropout_feat(x, training=is_training)
x = self.hidden_proj(x)
logits = self.output_proj(x)
return logits
def build_train_graph(self, dataloader):
X_batch, y_batch = dataloader.train_iterator.get_next()
logits = self.forward(X_batch, is_training=True)
self.ops['global_step'] = tf.Variable(0, trainable=False)
self.ops['loss'] = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=y_batch))
self.ops['train'] = tf.train.AdamOptimizer().minimize(
self.ops['loss'], global_step=self.ops['global_step'])
def build_predict_graph(self, dataloader):
self.ops['pred_logits'] = self.forward(dataloader.predict_iterator.get_next(),
is_training=False)
| [
"[email protected]"
] | |
c37099ecc1222f0c5828a745aa379d5e3379a1f6 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/cdn/azure-mgmt-cdn/generated_samples/custom_domains_delete.py | a6329d05e663555c4614fd72f5abf4438f6b3885 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,593 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.cdn import CdnManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cdn
# USAGE
python custom_domains_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CdnManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.custom_domains.begin_delete(
resource_group_name="RG",
profile_name="profile1",
endpoint_name="endpoint1",
custom_domain_name="www-someDomain-net",
).result()
print(response)
# x-ms-original-file: specification/cdn/resource-manager/Microsoft.Cdn/stable/2021-06-01/examples/CustomDomains_Delete.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
e1823faa28ca7cfcca7c723b670ba84b7359aad2 | 7972f2a08d201325f13847086d9c6e3161fa0e95 | /OpenCV.xcodeproj/lib/python3.7/site-packages/numpy/polynomial/tests/test_laguerre.py | bc94afbefd1951fbcc5e416ace0ad745b67e4868 | [] | no_license | valiok98/testing | d430e1a2bfa6c4ec758f6629cb0e11f3d19e1480 | b022b04e92f14d5c7fa69d589bfa8983160890a4 | refs/heads/master | 2022-01-14T22:11:00.365164 | 2019-05-14T11:53:08 | 2019-05-14T11:53:08 | 184,013,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | ../../../../../../Cellar/numpy/1.16.3_1/lib/python3.7/site-packages/numpy/polynomial/tests/test_laguerre.py | [
"[email protected]"
] | |
5149a29ec22475fb3f959d83ecc335c7692de590 | d48a10791e607b1690eea9c6c7191416cd60dc2d | /src/tlog/receiver/parse/syslog.py | 1691afeb50010f7356e35dd4e226981e5d97f225 | [] | no_license | thomaserlang/TLog | d84d1f51d5454598539fa8ab56b26292a13899f9 | 3f331c7169c90d1fac0d1922b011b56eebbd086a | refs/heads/master | 2020-06-02T19:26:41.600474 | 2014-08-23T19:00:34 | 2014-08-23T19:00:34 | 12,624,537 | 2 | 0 | null | 2013-09-13T16:00:48 | 2013-09-05T17:52:29 | JavaScript | UTF-8 | Python | false | false | 7,392 | py | import logging
import re
import dateutil.parser
from tlog.receiver.parse.parsed import Parsed
class Syslog(object):
'''
Class for parsing different syslog standards.
Parse a syslog message:
Syslog.parse('some syslog').
Returns none if the log format is not supported.
Supported standards:
* RFC 3164
* RFC 5424
'''
# PRI TIMESTAMP HOST MESSAGE
regex_3164 = re.compile('^<([0-9]+)>([a-z ]+ [0-9]+ [0-9:]+) ([^ ]+) (.*)$', re.IGNORECASE)
# PRI VERSION TIME HOST APP PROCID MSGID STRUC-DATA MESSAGE
regex_5424 = re.compile('^<([0-9]+)>([0-9]+) ([^ ]+) ([^ ]+) ([^- ]+|-) ([^- ]+|-) ([^- ]+|-) (\[.*\]|-) (.*)$', re.IGNORECASE)
@classmethod
def parse_message(cls, message):
'''
Removes BOM from the message.
BOM indicates that the message is encoded with utf-8.
:param message: str
:returns: str
'''
if message[:3] == 'BOM':
if isinstance(message, unicode):
return message[3:]
return message[3:].decode('utf-8')
return message
@classmethod
def parse_process_info_3164(cls, message):
'''
Returns process info from a message and the message,
where the process info has been removed.
Example:
su[123]: 'su root' failed for lonvick on /dev/pts/8
:param message: str
:returns: (str, dict)
str:
'su root' failed for lonvick on /dev/pts/8
dict:
{
'app-name': 'su',
'procid': 123
}
'''
i = 0
value = ''
data = {}
prev_s = ''
if message[:1] == ':':
return (message[1:], data)
for s in message:
i += 1
if s == ' ' and prev_s == ':':
if 'app-name' in data:
return (message[i:], data)
elif s == ' ':
return (message, data)
elif s in ('[', ':'):
if 'app-name' not in data:
data['app-name'] = value
value = ''
prev_s = s
continue
elif s == ']':
data['procid'] = int(value)
continue
value = value + s
prev_s = s
return (message, data)
@classmethod
def parse_structured_data(cls, structured_data):
'''
Parses a structured-data as specified in: http://tools.ietf.org/html/rfc5424#section-6.3
Example:
[exampleSDID@32473 iut="3" eventSource="Application \\"[test\\]\\"" eventID="1011"][examplePriority@32473 class="high"]
:param structured_data: str
http://tools.ietf.org/html/rfc5424#section-6.3
:returns: dict
{
'exampleSDID@32473': {
'iut': '3',
'eventSource': 'Application "[test]"',
'eventID': '1011'
},
'examplePriority@32473': {
'class': 'high'
}
}
'''
def remove_escaped(value):
# http://tools.ietf.org/html/rfc5424#section-6.3.3
value = value.replace(u'\\"', u'"')
value = value.replace(u'\\]', ']')
value = value.replace(u'\\\\', '\\')
return value
if isinstance(structured_data, str):
structured_data = structured_data.decode('utf-8')
parsed = {}
d = parsed
key = u''
find_key = True
find_field = False
value = u''
in_string = False
prev_s = u''
for s in structured_data:
if not in_string:
if s == u'[':
find_key = True
find_field = False
d = parsed
continue
if s in (u' ', u']'): # key found
if not key:
continue
parsed[key] = {}
d = parsed[key]
find_field = True
key = ''
continue
if s == u'=':# key found and value start
find_field = False
in_string = False
continue
if s == u'"' and prev_s <> u'\\':
if not in_string:
in_string = True
continue
# value found
d[key] = remove_escaped(value)
value = ''
key = ''
find_field = True
in_string = False
continue
if not in_string:
key = key + s
else:
value = value + s
prev_s = s
return parsed
@classmethod
def parse_3164(cls, log):
'''
:returns: ``Parsed``
'''
match = cls.regex_3164.match(log)
if match:
pri = int(match.group(1))
severity = pri % 8
message, process_info = cls.parse_process_info_3164(match.group(4))
data = {
'message': cls.parse_message(message),
'priority': pri,
'facility': pri / 8,
'severity': severity,
'timestamp': dateutil.parser.parse(match.group(2)),
}
data.update(process_info)
return Parsed(
hostname=match.group(3),
level=severity,
data=data,
standard=u'Syslog 3164',
)
return None
@classmethod
def parse_5424(cls, log):
'''
:returns: ``Parsed``
'''
match = cls.regex_5424.match(log)
if match:
pri = int(match.group(1))
severity = pri % 8
data = {
'message': cls.parse_message(match.group(9)),
'priority': pri,
'facility': pri / 8,
'severity': severity,
'timestamp': dateutil.parser.parse(match.group(3)),
}
if match.group(5) <> '-':
data['app-name'] = match.group(5)
if match.group(6) <> '-':
data['procid'] = match.group(6)
if match.group(7) <> '-':
data['msgid'] = match.group(7)
if match.group(8) <> '-':
data['structured-data'] = cls.parse_structured_data(match.group(8))
return Parsed(
hostname=match.group(4),
level=severity,
data=data,
standard=u'Syslog 5424',
)
return None
@classmethod
def parse(cls, log):
'''
Tries the different log standards.
Returns none if the log format is not supported.
:returns: Parsed
'''
s_3164 = cls.parse_3164(log)
if s_3164:
return s_3164
s_5424 = cls.parse_5424(log)
if s_5424:
return s_5424
return None | [
"[email protected]"
] | |
6930b3b01ee36c759829bd4f634118dc666a853f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2987.py | 472b1cd797ec2131dde4ee210490114eef1d7309 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,147 | py | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.cx(input_qubit[0],input_qubit[3]) # number=36
prog.x(input_qubit[3]) # number=37
prog.cx(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.z(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=35
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2987.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
bab103f06638d1b892180096303b9bf0e6475feb | 92a7d8b7cef98a8af9e11c3a3e32537e42a5909f | /temp.py | bb3d3707f91aad9a5bb630f9d7575eccac34a46d | [] | no_license | shanto268/focus-stacking | 3b8f74ec7338f2664ac35f88f32bcdc304c011ff | e3e9dba2adfdb4a85a3a5b4a0c4f9f40e4240a9a | refs/heads/master | 2023-04-11T04:36:16.236913 | 2021-04-22T20:08:43 | 2021-04-22T20:08:43 | 360,668,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | def getNeatTomograms(csvFile):
I_matrices_reshaped = np.loadtxt(csvFile)
filters = [
'none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36',
'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom',
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'
]
for j in range(len(filters)):
dir_name = "/Users/sshanto/hep/hep_daq/CAMAC/focus-stacking/images/mystery_same_axis/{}".format(
filters[j])
mkdir_p(dir_name)
k = 0
print("Using {} filter".format(filters[j]))
for i in I_matrices_reshaped:
i = i.reshape(21, 21)
fig = plt.figure(frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
imshowobj = ax.imshow(np.flip(i),
aspect='auto',
inerpolation=filters[j])
imshowobj.set_clim(0.9, 1.2)
fname = "{}/img{}.png".format(dir_name, k)
fig.savefig(fname)
k += 1
| [
"[email protected]"
] | |
b1efdec2c8d1b55d5b698c8a63e0cd204b58a672 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /problems/0947.0_Most_Stones_Removed_with_Same_Row_or_Column.py | cd63b6a1fee276fbeb5a6d85dbc93e57564f6f69 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | '''
Runtime: 355 ms, faster than 64.84% of Python3 online submissions for Most Stones Removed with Same Row or Column.
Memory Usage: 14.6 MB, less than 72.25% of Python3 online submissions for Most Stones Removed with Same Row or Column.
'''
class UF:
def __init__(self, n):
self.p = list(range(n))
self.set_count = n
def find(self, x):
if x == self.p[x]:
return x
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, x, y):
px, py = self.find(x), self.find(y)
if px != py:
self.p[px] = py
self.set_count -= 1
class Solution:
def removeStones(self, stones: List[List[int]]) -> int:
# same y; x list -> index for stones
x_indice = defaultdict(list)
# same x; y list -> index for stones
y_indice = defaultdict(list)
n = len(stones)
for i, (x, y) in enumerate(stones):
x_indice[y].append(i)
y_indice[x].append(i)
uf = UF(n)
for _, indice in chain(x_indice.items(), y_indice.items()):
i = indice[0]
for j in indice:
uf.union(i, j)
return n - uf.set_count
| [
"[email protected]"
] | |
60348d5d0dcc591878cc15c5daf0973aebeb18b0 | b15ccd04d3edfb4d6278a055422610be09c3916c | /4615_재미있는 오셀로 게임/sol3.py | 1c09cf931c77a05419f6e8447e49b0e62a0ad716 | [] | no_license | hksoftcorn/Algorithm | d0f3a1a6009f47e4f391e568b29a3b51d6095d33 | 81b067b8105ba305172dd8271787c19f04d170ba | refs/heads/master | 2023-05-12T21:15:34.668580 | 2021-06-08T07:57:04 | 2021-06-08T07:57:04 | 337,121,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | import sys
sys.stdin = open('sample_input.txt', 'r')
def check(col, row, color):
global arr, N
def reversi(col, row, color):
global arr
# 상하좌우 / 대각선
dx = [-1, 1, 0, 0, -1, -1, 1, 1]
dy = [0, 0, -1, 1, -1, 1, -1, 1]
for i in range(8):
d_x = dx[i]
d_y = dy[i]
change_list = []
while True:
arr[col][row] = color
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
arr = [[0] * N for _ in range(N)]
# 흑 : 1, 백 : 2
arr[N // 2][N // 2 - 1] = 1
arr[N // 2 - 1][N // 2] = 1
arr[N // 2][N // 2] = 2
arr[N // 2 - 1][N // 2 - 1] = 2
for _ in range(M):
col, row, color = map(int, input().split())
reversi(col, row, color)
print(arr)
black = 0
white = 0
for i in range(len(arr)):
for j in range(len(arr[0])):
if arr[i][j] == 1:
black += 1
elif arr[i][j] == 2:
white += 1
print('#{} {} {}'.format(tc, black, white)) | [
"[email protected]"
] | |
0c9583a3811439fcc5cda71bf42497a2544f7d7b | 3c27b86f0165ab24e6b04d505e8471e032594f0b | /pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES1/IMG/user_clip_plane.py | b8f7827c07884c9fba7dd0ac8e86eab155bfd9a7 | [
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"MIT"
] | permissive | alexus37/AugmentedRealityChess | 8b9ccdfffc8aee93a86a44b8ef53c034ec6a10d1 | 7f600ad153270feff12aa7aa86d7ed0a49ebc71c | refs/heads/master | 2020-12-24T13:29:21.967833 | 2020-02-27T09:38:50 | 2020-02-27T09:38:50 | 31,264,034 | 1 | 1 | MIT | 2020-02-27T09:38:52 | 2015-02-24T14:36:34 | Python | UTF-8 | Python | false | false | 938 | py | '''OpenGL extension IMG.user_clip_plane
This module customises the behaviour of the
OpenGL.raw.GLES1.IMG.user_clip_plane to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/IMG/user_clip_plane.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.IMG.user_clip_plane import *
from OpenGL.raw.GLES1.IMG.user_clip_plane import _EXTENSION_NAME
def glInitUserClipPlaneIMG():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glClipPlanefIMG=wrapper.wrapper(glClipPlanefIMG).setInputArraySize(
'eqn', 4
)
glClipPlanexIMG=wrapper.wrapper(glClipPlanexIMG).setInputArraySize(
'eqn', 4
)
### END AUTOGENERATED SECTION | [
"[email protected]"
] | |
1b93ddc9c6c1399eaa6bec5b56290bfafa63f575 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/b93d3b23f51ef282c90dcf924475eb1fcaa4151d-<__getitem__>-fix.py | 5adbfa85f57ff1603ef0188a487be24d8bd31338 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py |
def __getitem__(self, key):
if isinstance(key, slice):
(start, stop) = (key.start, key.stop)
if (start is None):
start = 0
if (stop is None):
stop = self.data.shape[0]
if ((stop + self.start) <= self.end):
idx = slice((start + self.start), (stop + self.start))
else:
raise IndexError
elif isinstance(key, int):
if ((key + self.start) < self.end):
idx = (key + self.start)
else:
raise IndexError
elif isinstance(key, np.ndarray):
if ((np.max(key) + self.start) < self.end):
idx = (self.start + key).tolist()
else:
raise IndexError
elif isinstance(key, list):
if ((max(key) + self.start) < self.end):
idx = [(x + self.start) for x in key]
else:
raise IndexError
else:
raise IndexError
if (self.normalizer is not None):
return self.normalizer(self.data[idx])
else:
return self.data[idx]
| [
"[email protected]"
] | |
62162d4ab0cfb57a6d6fefa1e484bb594dd92685 | ec15546fbcf6a2523f08950f0da01ade4451524f | /example_project/testapp/streams.py | dd63d45ce22d39cd8fdddc679a4b03ef35dece2e | [
"BSD-3-Clause"
] | permissive | seaw688/django-activity-stream | 5869f3716e8d261296d520455f6412cbeca3c2db | b4e470444bcccb666bfb3711c0bced7863936ff9 | refs/heads/master | 2020-12-18T10:57:24.140620 | 2020-01-21T15:39:07 | 2020-01-21T15:39:07 | 235,354,434 | 0 | 0 | BSD-3-Clause | 2020-01-21T13:55:13 | 2020-01-21T13:55:12 | null | UTF-8 | Python | false | false | 378 | py | from datetime import datetime
from actstream.managers import ActionManager, stream
class MyActionManager(ActionManager):
@stream
def testfoo(self, obj, time=None):
if time is None:
time = datetime.now()
return obj.actor_actions.filter(timestamp__lte = time)
@stream
def testbar(self, verb):
return self.filter(verb=verb)
| [
"[email protected]"
] | |
3e0281fe6bf7c9011f21574c13f9ca744ce5653c | 8efe56ee34c455a6b1336897f6d457acbc9c10f9 | /examples/torch/pearl_half_cheetah_vel.py | 0e468a052eb49096c983e7c4462f42d069776847 | [
"MIT"
] | permissive | neurips2020submission11699/metarl | ab18d11e708bf569d76cb2fab2bcce089badd111 | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | refs/heads/master | 2022-10-15T22:03:09.948673 | 2020-06-11T19:22:55 | 2020-06-11T19:30:58 | 268,410,657 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,501 | py | #!/usr/bin/env python3
"""PEARL HalfCheetahVel example."""
import click
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv, normalize
from metarl.envs.mujoco import HalfCheetahVelEnv
from metarl.experiment import LocalRunner
from metarl.experiment.deterministic import set_seed
from metarl.experiment.task_sampler import SetTaskSampler
from metarl.sampler import LocalSampler
from metarl.torch import set_gpu_mode
from metarl.torch.algos import PEARL
from metarl.torch.algos.pearl import PEARLWorker
from metarl.torch.embeddings import MLPEncoder
from metarl.torch.policies import ContextConditionedPolicy
from metarl.torch.policies import TanhGaussianMLPPolicy
from metarl.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--num_epochs', default=500)
@click.option('--num_train_tasks', default=100)
@click.option('--num_test_tasks', default=30)
@click.option('--encoder_hidden_size', default=200)
@click.option('--net_size', default=300)
@click.option('--num_steps_per_epoch', default=2000)
@click.option('--num_initial_steps', default=2000)
@click.option('--num_steps_prior', default=400)
@click.option('--num_extra_rl_steps_posterior', default=600)
@click.option('--batch_size', default=256)
@click.option('--embedding_batch_size', default=100)
@click.option('--embedding_mini_batch_size', default=100)
@click.option('--max_path_length', default=200)
@wrap_experiment
def pearl_half_cheetah_vel(ctxt=None,
seed=1,
num_epochs=500,
num_train_tasks=100,
num_test_tasks=30,
latent_size=5,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=2000,
num_initial_steps=2000,
num_tasks_sample=5,
num_steps_prior=400,
num_extra_rl_steps_posterior=600,
batch_size=256,
embedding_batch_size=100,
embedding_mini_batch_size=100,
max_path_length=200,
reward_scale=5.,
use_gpu=False):
"""Train PEARL with HalfCheetahVel environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
env_sampler = SetTaskSampler(lambda: MetaRLEnv(
normalize(HalfCheetahVelEnv())))
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler(lambda: MetaRLEnv(
normalize(HalfCheetahVelEnv())))
runner = LocalRunner(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size)
pearl_half_cheetah_vel()
| [
"[email protected]"
] | |
684cc61e7ed14e6b451bd27e39aa48200081c2b3 | d7ec9d21fd426f4037908d4cd1b0532d3ea4288d | /simdb/settings.py | 21040d8caed58785045b8d33aebbfca021820f57 | [] | no_license | simmons-tech/simdb | 53fdbfeb999ad2f31a442d31b90205ac1529ead9 | be1938fb2139ee394dd9cfbcba8fd762b58c78d9 | refs/heads/master | 2020-04-22T10:06:28.846310 | 2016-10-13T03:36:51 | 2016-10-13T03:36:51 | 67,450,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,096 | py | """
Django settings for simdb project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from . import database
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv(
'DJANGO_SECRET_KEY',
'mh_!(!r7%^ysphmfhz2cmoshto3p#yjwa$!14&2r&pn#&9pi^w'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Libraries
'oidc_auth',
# Apps
'home',
'people',
'sdb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'oidc_auth.auth.OpenIDConnectBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/oidc/login/'
LOGIN_REDIRECT_URL = '/'
OIDC_AUTH = {
'DEFAULT_PROVIDER': {
'issuer': 'https://oidc.mit.edu/',
'authorization_endpoint': 'https://oidc.mit.edu/authorize',
'token_endpoint': 'https://oidc.mit.edu/token',
'userinfo_endpoint': 'https://oidc.mit.edu/userinfo',
'jwks_uri': 'https://oidc.mit.edu/jwk',
'signing_alg': 'RS256',
'client_id': os.getenv('OIDC_CLIENT_ID'),
'client_secret': os.getenv('OIDC_CLIENT_SECRET'),
},
'SCOPES': ('openid', 'profile', 'email'),
}
ROOT_URLCONF = 'simdb.urlsdummy'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simdb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': database.config(),
'sdb': database.config('SDB'),
}
DATABASE_ROUTERS = ['sdb.utils.SdbRouter']
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
| [
"[email protected]"
] | |
b36824f0be40160f9c5f18035f4391791f1f4e17 | 2f122071e3cee6a150392edee2e4a2b0770309bb | /src/apps/search/views.py | e68ce7f9bef692437b25cd0939469393610d31bc | [] | no_license | snicoper/ofervivienda | 4ca63472ca50406469977057f53b91a81c30f57f | 44b8d2934105ccbf02ff6c20896aa8c2b1746eaa | refs/heads/master | 2020-04-01T09:30:31.640803 | 2018-10-15T08:23:59 | 2018-10-15T08:23:59 | 153,077,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,490 | py | from django.shortcuts import Http404
from django.views.generic import FormView, ListView
from anuncios.mixins.views import ClassFromCategoryNameMixin
from anuncios.models import Anuncio
from anuncios.settings import ANUNCIO_PAGINATE_BY
from favorites.mixins.views import FavoriteListContextMixin
from utils.text import ucfirst
from .forms import BaseSearchForm, SearchAddressForm
class IndexSearchView(FormView):
"""Form de búsqueda en el home.
Cuando pulsa en buscar, lo procesa SearchFiltersView.
"""
template_name = 'search/index.html'
form_class = SearchAddressForm
class SearchFiltersView(FavoriteListContextMixin, ListView):
"""Form con filtros de busqueda."""
template_name = 'search/search_filters.html'
paginate_by = ANUNCIO_PAGINATE_BY
context_object_name = 'anuncio_list'
model = Anuncio
def dispatch(self, request, *args, **kwargs):
"""Probar que la categoría del URLConf existe.
Hay dos URLConf apuntan a la view, así que en caso de tener category,
se ha de probar que existe.
Si no hay category, muestra por defecto 'piso'.
"""
self.category = 'piso'
if self.kwargs.get('category'):
self.category = self.kwargs.get('category')
if not hasattr(self.model, self.category.upper()):
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_form(self):
"""Obtener el form en base a self.category."""
form = BaseSearchForm.get_form_class(self.category)(self.request.GET or None)
form.initial['category'] = self.category.upper()
return form
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = self.get_form()
context['category_name'] = ucfirst(self.category)
return context
def get_queryset(self, **kwargs):
"""Queryset para búsquedas del formulario con filtros."""
form = self.get_form()
queryset = self.model.objects.none()
if form.is_valid() and form.cleaned_data.get('q'):
category = form.cleaned_data.get('category').upper()
type_anuncio = form.cleaned_data.get('type_anuncio')
metros_cuadrados = form.cleaned_data.get('metros_cuadrados', 0)
habitaciones = form.cleaned_data.get('habitaciones', 0)
banos = form.cleaned_data.get('banos', 0)
precio = form.cleaned_data.get('precio', 0)
genero = form.cleaned_data.get('genero', None)
fumar_piso = form.cleaned_data.get('permite_fumar_piso', False)
fumar_habitacion = form.cleaned_data.get('permite_fumar_habitacion', False)
internet = form.cleaned_data.get('internet', False)
address = form.cleaned_data.get('q')
queryset = Anuncio.get_model_class(category).objects.published(
location_string__search=address
)
if queryset.exists():
# Filtros en la búsqueda.
if type_anuncio:
queryset = queryset.filter(type_anuncio=type_anuncio)
if habitaciones and habitaciones > 0:
queryset = queryset.filter(habitaciones__gte=habitaciones)
if banos and banos > 0:
queryset = queryset.filter(banos__gte=banos)
if metros_cuadrados and metros_cuadrados > 0:
queryset = queryset.filter(metros_cuadrados__gte=metros_cuadrados)
if genero:
queryset = queryset.filter(genero=genero)
if fumar_piso:
queryset = queryset.filter(permite_fumar_piso=fumar_piso)
if fumar_habitacion:
queryset = queryset.filter(permite_fumar_habitacion=fumar_habitacion)
if internet:
queryset = queryset.filter(internet=internet)
if precio and precio > 0:
queryset = queryset.filter(precio__lte=precio)
return queryset.select_related('owner').prefetch_related('image_anuncio')
class SearchMapView(ClassFromCategoryNameMixin, FormView):
"""Form avanzado de búsquedas, muestra mapa de gmaps."""
template_name = 'search/search_map.html'
form_class = None
model = Anuncio
def get(self, request, *args, **kwargs):
"""Comprueba que exista la categoría en el URLConf."""
if not self.category or not hasattr(Anuncio, self.category):
raise Http404
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_list = {k: v for k, v in Anuncio.CATEGORY_CHOICES}
context['title'] = category_list.get(self.category)
context['category_name'] = self.category
return context
def get_form(self):
"""Obtener el form según category."""
form_class = BaseSearchForm.get_form_class(self.category)
initial = {'category': self.category}
if self.request.user.is_authenticated and self.request.user.user_location:
initial['latitude'] = self.request.user.user_location.latitude
initial['longitude'] = self.request.user.user_location.longitude
form = form_class(self.request.GET or None, initial=initial)
# Eliminar el campo q
del form.fields['q']
return form
| [
"[email protected]"
] | |
a8c9730f594067c302297e85bf27f363f28b7440 | 220535ba153c2fca757c264cab56d2ff835f4fd4 | /02_K-近邻算法KNN/KNN.py | 12d160bc82b9aaca1311f5b885db5491a719ccfc | [] | no_license | cp4011/Machine-Learning | d41b5b8533b8d2210d1e69b944f8ea997c2488b4 | 7f4576278990f8a2de302e69eb6373d169f9fbc8 | refs/heads/master | 2020-04-04T15:41:53.912450 | 2019-03-26T15:37:35 | 2019-03-26T15:37:35 | 156,048,459 | 2 | 0 | null | 2018-11-04T06:14:57 | 2018-11-04T04:16:03 | Python | UTF-8 | Python | false | false | 9,066 | py | from numpy import *
from os import listdir
"""k-近邻算法
优点:精度高、对异常值不敏感、无数据输入假定。
缺点:计算复杂度高、空间复杂度高。
使用数据范围:数值型和标称型。
伪代码:
对未知类别属性的数据集中的每个点依次执行以下操作:
(1).计算已知类别数据集中的点与当前点之间的距离;
(2).按照距离递增次序排序;
(3).选取与当前点距离最小的k个点;
(4).确定前k个点所在类别的出现频率;
(5).返回前k个点出现频率最高的类别作为当前点的预测分类。
"""
# k-近邻算法
def classify0(inputX, dataSet, labels, k):
"""
Function: 创建数据集和标签
Args: inputX:用于分类的输入向量 (1xN)
dataSet:输入的训练样本集 (MxN)
labels:标签向量 (1xM vector)
k:用于比较的近邻数量 (should be an odd number奇数)
Returns: sortedClassCount[0][0]:分类结果
"""
# dataSet.shape[0]:求dataSet矩阵的行数;dataSet.shape[1]:求dataSet矩阵的列数; dataSet.shape:元组形式输出矩阵行数、列数
dataSetSize = dataSet.shape[0]
# tile(A, B):将A重复B次,其中B可以是int类型也可以是元组类型;这句话相当于向量inputX与矩阵dataSet里面的每组数据做差
diffMat = tile(inputX, (dataSetSize, 1)) - dataSet # numpy.tile()复制
# sqDiffMat.sum(axis=0):对矩阵的每一列求和
# sqDiffMat.sum(axis=1):对矩阵的每一行求和
# sqDiffMat.sum():对整个矩阵求和
distances = (diffMat**2).sum(axis=1)**0.5 # 计算欧式距离((x1-x2)^2 + (y1-y2)^2)^0.5
sortedDistances = distances.argsort() # 返回从小到大排序的索引 # 将矩阵的每一行向量相加: sum(a,axis=1)或a.sum(axis=1)
classCount = {}
for i in range(k): # 给字典赋值
y = labels[sortedDistances[i]] # 字典的key
classCount[y] = classCount.get(y, 0) + 1 # dict.get(key, default=none)
result = sorted(classCount.items(), key=lambda x: x[1], reverse=True) # d.items()返回的是一个列表,如[('a',74), ('b',90)]
# 返回可遍历的(键, 值) 元组数组,如[('Google', 3), ('taobao', 2), ('Runoob', 1)]
return result[0][0] # 错误return result[0],这是返回的tuple
# 数据集举例
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
# 准备数据:从文本文件中解析数据
def file2matrix(filename):
f = open(filename)
arrayOLines = f.readlines()
numberOfLines = len(arrayOLines)
returnMat = zeros((numberOfLines, 3)) # 创建返回的NumPy矩阵
classLabelVector = [] # 创建返回的向量列表
index = 0
for line in arrayOLines:
line = line.strip() # 截取掉头尾的所有的回车字符和空格
listFromLine = line.split('\t') # 使用tab字符将上一步得到的整行数据分割成一个元素列表
returnMat[index] = listFromLine[0:3] # 选取前三个元素,存储到特征矩阵中
classLabelVector.append(int(listFromLine[-1])) # 错classLabelVector = listFromLine[-1]
index += 1
return returnMat, classLabelVector # 返回训练样本矩阵和类标签向量
# 归一化特征值 newValue = (oldValue - min)/(max - min)
def autoNorm(dataSet):
# 求取列的最小值
minValues = dataSet.min(0) # 参数0使得函数从列中取得最小值,得到1 * 3 的向量
maxVelues = dataSet.max(0)
ranges = maxVelues - minValues
m = len(dataSet)
minValueMatrix = tile(minValues, (m, 1)) # 复制1*3的向量minValues成m行1列
rangesMatrix = tile(ranges, (m, 1))
returnNorm = zeros(shape(dataSet)) # 创建输出矩阵normDataSet
returnNorm = dataSet - minValueMatrix
normDataSet = returnNorm / rangesMatrix # Numpy中矩阵除法是:linalg.solve(matA, matB)
# 返回归一化矩阵、差值向量和最小值向量
return normDataSet, ranges, minValues # 可以只返回归一化的结果normDataSet,后两个结果 可以用于归一化输入inputX
# 分类器针对约会网站的测试代码
def datingClassTest():
ratio = 0.1 # 使用10%的数据去测试分类器
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat) # 归一化特征值
m = normMat.shape[0]
num = int(m * ratio) # 初始化测试向量个数
errorCount = 0.0
for i in range(num): # 对测试集分类,返回分类结果并打印
# 错classifierResult = classify0(normMat[i, :], normMat[num, :], datingLabels[num, :], 3)
# 传参给分类器进行分类,每个for循环改变的参数只有第一项的测试数据而已
classifierResult = classify0(normMat[i, :], normMat[num:m, :], datingLabels[num:m], 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
if classifierResult != datingLabels[i]:
errorCount += 1.0
print("the errorCount is %d" % errorCount)
print("the total error rate is: %f" % (errorCount / float(num))) # 注意是%f, 而%d:0.066就输出为0
# KNN预测约会网站某人的喜欢程度
def classifyPerson():
resultList = ['not at all', 'in small doses', 'in large doses']
percentTats = float(input("percentage of time spent playing video games?\n"))
ffMiles = float(input("frequent flier miles earned per year?\n"))
iceCream = float(input("liters of ice cream consumed per year?\n"))
inArr = array([percentTats, ffMiles, iceCream]) # 注意要加上[]
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minValues = autoNorm(datingDataMat)
result = classify0((inArr-minValues)/ranges, normMat, datingLabels, 3) # 进行分类,且输入inArr归一化
print("You will probably like this person:", resultList[result - 1]) # 注意 减1
# test
classifyPerson()
# KNN识别手写数据集(32*32转换成1*1024的向量)
def img2vector(filename):
returnVect = zeros((1, 1024)) # 注意有两个()
f = open(filename)
for i in range(32): # 两层循环 32*32
lineStr = f.readline()
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j]) # 存储在1*1024的Numpy的数组中,注意加int()和[0, 32*i+j]中的0,
return returnVect # 返回要输出的1*1024向量
def handWritingClassTest():
trainingFileList = listdir('trainingDigits') # os.listdir()用于返回指定的文件夹包含的文件或文件夹的名字的列表(以字母顺序)
m = len(trainingFileList) # 获取训练文件数目
trainingMat = zeros((m, 1024)) # 初始化训练矩阵
hwLabels = []
for i in range(m): # 开始提取训练集
fileNameStr = trainingFileList[i] # 从文件名列表中循环取各文件名
trainingMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr)
a = fileNameStr.split('.')[0] # 从文件名解析出分类数字
b = int(a.split('_')[0]) # 注意要将字符int()一下
hwLabels.append(b) # 存储解析出的分类数字到标签中
testFileList = listdir('testDigits')
mTest = len(testFileList)
errorCount = 0.0
for i in range(mTest):
fileNameStr = testFileList[i]
vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
a = fileNameStr.strip('.')[0]
b = int(a.strip('_')[0])
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3) # 参数传入分类器进行分类
# print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, b))
if classifierResult != b:
errorCount += 1.0
print("the total number of errors is: %d" % errorCount)
print("the total error rate is: %f" % (errorCount / float(mTest)))
if __name__ == "__main__":
handWritingClassTest()
| [
"[email protected]"
] | |
afc12e1e19a07dc5ca5449d3f2a3421f0d611459 | 33e4a68cfb7b37b2a75afab0867be0004d569daa | /landcarve/constants.py | f83b743aa5731f96329ce3a1232ed8863c4205ed | [] | no_license | phillipvan/landcarve | ac4976adbe6c8ddae391d87d203bcf0cb49a0a07 | ab717c63255d3568ad00517bd6fe08a23502da85 | refs/heads/master | 2023-08-15T02:41:04.175401 | 2021-09-29T22:43:37 | 2021-09-29T22:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | NODATA = -1000
| [
"[email protected]"
] | |
4aea3afea6a9199732f404cf7c84b91ca28615e2 | 0ed050cd8d23bf4679860694bbc5d6f2e45f229a | /src/q13es/tests.py | 0056b741b65be1b6294b2fc08682a819429d57ee | [] | no_license | dsysme/hackforisrael | 5404563d57f0034b1cefe09586abb6d65f8d5442 | 6081533fb8fbdecf28b4a4d85b7e9822d8969ca4 | refs/heads/master | 2020-12-13T20:54:55.744381 | 2013-11-03T23:34:07 | 2013-11-03T23:34:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,672 | py | from django import forms
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils.translation import gettext as _
from q13es.forms import create_form, split_form_file, parse_field, parse_form
from q13es.models import Answer
import os.path
User = get_user_model()
class Q13esTest(TestCase):
# CHOICES1 = (
# (1, 'One'),
# (2, 'Two'),
# (3, 'Three'),
# )
#
# def setUp(self):
# self.u = User.objects.create_user("foobar")
def test_split_form_file(self):
s = "Shalom!\n[abc]\nfoo\n[def?]\nbar"
result = split_form_file(s)
self.assertEquals(
('Shalom!', [('abc', True, 'foo'), ('def', False, 'bar')]),
result)
def test_split_form_file_no_fields(self):
s = "Shalom!"
result = split_form_file(s)
self.assertEquals(('Shalom!', []), result)
def test_split_form_file_empty(self):
s = ""
result = split_form_file(s)
self.assertEquals(('', []), result)
#
#
def test_parse_field_default(self):
s = "foo"
result = parse_field(s)
self.assertEquals((None, {'label': 'foo', 'help_text': ''}), result)
def test_parse_field_text(self):
s = "bar\n\ntext"
result = parse_field(s)
self.assertEquals(('text', {'label': 'bar', 'help_text': ''}), result)
def test_parse_field_text_with_help(self):
s = "bar\n\ntext\n\ncontent\n123\n\nfoo\n\nbar"
result = parse_field(s)
self.assertEquals(('text', {'help_text': 'content 123\nfoo\nbar', 'label': 'bar'}), result)
def test_parse_field(self):
s = """
What is your favourite color?
radio:
* red
* green
* blue
Please choose your
favourite color.
You can choose only one
"""
result = parse_field(s)
expected = 'radio', {
'label': 'What is your favourite color?',
'help_text': 'Please choose your favourite color.\nYou can choose only one',
'choices': [
('red', 'red'),
('green', 'green'),
('blue', 'blue')],
}
self.assertEquals(expected, result)
# def test_build_field(self):
#
# args = {
# 'label': 'What is your favourite color?',
# 'help_text': 'Please choose your favourite color.\nYou can choose only one',
# 'choices': [
# ('red', 'red'),
# ('green', 'green'),
# ('blue', 'blue')],
# }
#
# result = build_field(_('radio'), args)
# self.assertIsInstance(result, forms.ChoiceField)
# self.assertIsInstance(result.widget, forms.RadioSelect)
# self.assertEquals(args['label'], result.label)
# self.assertEquals(args['help_text'], result.help_text)
# self.assertEquals(args['choices'], result.choices)
#
# def test_build_field_simple(self):
#
# result = build_field(None, {'label': 'Foo'})
# self.assertIsInstance(result, forms.CharField)
# self.assertEquals('Foo', result.label)
# def test_simple_q13e(self):
# """
# Tests that a form can be saved in an answer
# """
# class BasicForm(forms.Form):
# title = forms.CharField()
# notes = forms.CharField(widget=forms.Textarea)
# optional_charfield = forms.CharField(required=False)
# vote = forms.IntegerField(min_value=1, max_value=5)
# optional_intfield = forms.IntegerField(required=False)
# # choices = forms.MultipleChoiceField(choices=self.CHOICES1)
#
# data = {
# 'title': 'a\nb\nc',
# 'notes': 'a\nb\nc',
# 'vote': '1',
# 'choices': '12',
# 'foo': 'bar',
# 'foo': 'bar',
# }
#
# f = BasicForm(data)
# self.assertTrue(f.is_valid())
#
# # print f.cleaned_data
#
# a = Answer.objects.create(q13e_slug='basic', user=self.u, data=f.cleaned_data)
#
# self.assertEquals(1, len(self.u.answers.all()))
#
# self.assertEquals(['basic'], [o.q13e_slug for o in self.u.answers.all()])
def test_create_form(self):
"""
Tests that a form can be created from field definitions
"""
info = (
('title', (forms.CharField, {})),
('description', (forms.CharField, {
'widget': forms.Textarea,
})),
('flavour', (forms.ChoiceField, {
'widget': forms.RadioSelect,
'choices': (
(1, "A"),
(2, "B"),
)
})),
)
form_class = create_form(info)
self.assertIn(forms.BaseForm, form_class.__bases__)
form = form_class({})
self.assertEquals(3, len(form.fields))
self.assertEquals(3, len(form.errors))
form = form_class({
'title': ':-)',
'description': 'foo',
'flavour': '3'
})
self.assertEquals(1, len(form.errors))
s = forms.ChoiceField.default_error_messages['invalid_choice'] % {'value': '3'}
self.assertEquals(s.decode('utf8'), form.errors['flavour'][0])
form = form_class({
'title': ':-)',
'description': 'foo',
'flavour': '1'
})
self.assertEquals(0, len(form.errors))
self.assertEquals(form.cleaned_data, {
'title': ':-)',
'description': 'foo',
'flavour': '1'
})
def test_parse_form(self):
with open(os.path.join(os.path.dirname(__file__), 'test_form.txt')) as f:
text = f.read()
form_class = parse_form(text)
self.assertIn(forms.BaseForm, form_class.__bases__)
form = form_class()
self.assertEquals(4, len(form.fields))
| [
"[email protected]"
] | |
8cdbd7afe50b400f3a05da8118e35216bc8e369c | fa0d15b56a0d2bfd33a3bc5ca48c02d9b389af3d | /ggrc-core/src/ggrc/app.py | 794c4eb71bb29963b8b85425288d1423f465950a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Bryan-Guner-Backup/WULF | 5f50ca4069f267e2ce0b1b6537f340ead5d78984 | 59d70ce2d4fe148e78ed112746d6963eeccaa6e9 | refs/heads/master | 2023-08-22T13:15:51.457279 | 2021-10-29T08:16:03 | 2021-10-29T08:16:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,667 | py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Sets up Flask app."""
import re
from flask import Flask
from flask.ext.sqlalchemy import get_debug_queries
from flask.ext.sqlalchemy import SQLAlchemy
from tabulate import tabulate
from ggrc import contributions # noqa: imported so it can be used with getattr
from ggrc import db
from ggrc import extensions
from ggrc import notifications
from ggrc import settings
app = Flask('ggrc', instance_relative_config=True) # noqa: valid constant name
app.config.from_object(settings)
if "public_config" not in app.config:
app.config.public_config = {}
for key in settings.exports:
app.config.public_config[key] = app.config[key]
# Configure Flask-SQLAlchemy for app
db.app = app
db.init_app(app)
@app.before_request
def _ensure_session_teardown():
"""Ensure db.session is correctly removed
Occasionally requests are terminated without calling the teardown methods,
namely with DeadlineExceededError on App Engine.
"""
if db.session.registry.has():
db.session.remove()
def init_models(app_):
import ggrc.models
ggrc.models.init_app(app_)
def configure_flask_login(app_):
import ggrc.login
ggrc.login.init_app(app_)
def configure_webassets(app_):
"""Add basic webassets configuration."""
from ggrc import assets
app_.jinja_env.add_extension('webassets.ext.jinja2.assets')
app_.jinja_env.assets_environment = assets.environment
def configure_jinja(app_):
"""Add basic jinja configuration."""
app_.jinja_env.add_extension('jinja2.ext.autoescape')
app_.jinja_env.autoescape = True
app_.jinja_env.add_extension('jinja2.ext.with_')
app_.jinja_env.add_extension('hamlpy.ext.HamlPyExtension')
def init_services(app_):
import ggrc.services
ggrc.services.init_all_services(app_)
def init_views(app_):
import ggrc.views
ggrc.views.init_all_views(app_)
def init_extension_blueprints(app_):
for extension_module in extensions.get_extension_modules():
if hasattr(extension_module, 'blueprint'):
app_.register_blueprint(extension_module.blueprint)
def init_indexer():
import ggrc.fulltext
ggrc.indexer = ggrc.fulltext.get_indexer()
def init_permissions_provider():
from ggrc.rbac import permissions
permissions.get_permissions_provider()
def init_extra_listeners():
from ggrc.automapper import register_automapping_listeners
register_automapping_listeners()
def _enable_debug_toolbar():
"""Enable flask debug toolbar for benchmarking requests."""
if getattr(settings, "FLASK_DEBUGTOOLBAR", False):
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
def _enable_jasmine():
"""Set jasmine sources and specs if it's enabled.
Jasmine is used for javascript tests and is not installed on the production
environment, that is why we must check if it enabled before tying to import
it.
"""
if getattr(settings, "ENABLE_JASMINE", False):
from flask.ext.jasmine import Asset
from flask.ext.jasmine import Jasmine
# Configure Flask-Jasmine, for dev mode unit testing
jasmine = Jasmine(app)
jasmine.sources(
Asset("dashboard-js"),
Asset("dashboard-js-spec-helpers"),
Asset("dashboard-js-templates"))
jasmine.specs(
Asset("dashboard-js-specs"))
def _display_sql_queries():
"""Set up display database queries
This function makes sure we display the sql queries if the record setting is
enabled.
"""
report_type = getattr(settings, "SQLALCHEMY_RECORD_QUERIES", False)
valid_types = ('count', 'slow', 'all')
if report_type:
if report_type not in valid_types:
raise Exception("""Invalid SQLALCHEMY_RECORD_QUERIES value specified.
Possible options: {}""".format(', '.join(valid_types)))
# pylint: disable=unused-variable
@app.after_request
def display_queries(response):
"""Display database queries
Prints out SQL queries, EXPLAINs for queries above slow_threshold, and
a final count of queries after every HTTP request
"""
slow_threshold = 0.5 # EXPLAIN queries that ran for more than 0.5s
queries = get_debug_queries()
app.logger.info("Total queries: %s", len(queries))
if report_type == 'count':
return response
# We have to copy the queries list below otherwise queries executed
# in the for loop will be appended causing an endless loop
for query in queries[:]:
if report_type == 'slow' and query.duration < slow_threshold:
continue
app.logger.info(
"%.8f %s\n%s\n%s",
query.duration,
query.context,
query.statement,
query.parameters)
is_select = bool(re.match('SELECT', query.statement, re.I))
if query.duration > slow_threshold and is_select:
try:
statement = "EXPLAIN " + query.statement
engine = SQLAlchemy().get_engine(app)
result = engine.execute(statement, query.parameters)
app.logger.info(tabulate(result.fetchall(), headers=result.keys()))
except Exception as err: # pylint: disable=broad-except
app.logger.warning("Statement failed: %s", statement)
app.logger.exception(err)
return response
init_models(app)
configure_flask_login(app)
configure_webassets(app)
configure_jinja(app)
init_services(app)
init_views(app)
init_extension_blueprints(app)
init_indexer()
init_permissions_provider()
init_extra_listeners()
notifications.register_notification_listeners()
_enable_debug_toolbar()
_enable_jasmine()
_display_sql_queries()
| [
"[email protected]"
] | |
1da2545df8d545a097b32fdc32594028de41246c | d8346eaf1c910ff02c7b243692a2766b8b089f06 | /for-post/python-standard-library-threading/s2-timer-loop/loop.py | fb38dc289d93392d4877804a4926fc8e4cb27eda | [] | no_license | dustinpfister/examples-python | 55304c99ba3af82cd8784ee98745546632155c68 | a9910ee05d4df524f951f61b6d9778531a58ccbf | refs/heads/master | 2023-03-06T21:10:18.888654 | 2021-02-26T20:32:52 | 2021-02-26T20:32:52 | 318,595,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import threading
def printMess():
print('hello');
def loop(func, sec):
def wrapper():
loop(func, sec)
func()
t = threading.Timer(sec, wrapper)
t.start()
return t
t=loop(printMess, 1) | [
"[email protected]"
] | |
915ce9e4b26a80de0c7696c731b048a00f6e8272 | adbcfc92d71e9be39ccef49b3aee0d06cdcb0af1 | /alembic/versions/48e2185b4a81_added_foreign_key_to_the_line_stops.py | 50b33102a1ea440040fdc9b085a9132fe3c8f2b6 | [] | no_license | linxaddict/mpkscraper | 67e5cf5806974d4fe78f1c892f12870008951a91 | a3b48a16f5db448692751c7de1f443dc3b0b30e7 | refs/heads/master | 2020-06-15T03:33:19.935314 | 2016-12-17T21:07:20 | 2016-12-17T21:07:20 | 75,334,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | """added foreign key to the line_stops
Revision ID: 48e2185b4a81
Revises: 44516a7495c7
Create Date: 2016-12-17 21:50:48.779742
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '48e2185b4a81'
down_revision = '44516a7495c7'
branch_labels = None
depends_on = None
def upgrade():
op.create_foreign_key('fk_stop_line_stop', 'line_stops', 'stops', ['line_id'], ['id'])
def downgrade():
op.drop_constraint('fk_stop_line_stop', 'stops')
| [
"[email protected]"
] | |
75809450259f12afa6cc106612dff03bb1da556b | 6958566f07393cc0dbdc3d8cefdcc741153f721b | /flask0518/utils/settings.py | 678992a18fc5ba456baa786cf55a1bc20675649f | [] | no_license | iversongit/20180518 | 6ffeb7f91c7b66f07c9f4f0df7616accec473ba1 | 9ae2fb839815155c4784936a8c099cc98ebc15cf | refs/heads/master | 2020-03-17T19:55:33.897986 | 2018-05-19T02:11:41 | 2018-05-19T02:11:41 | 133,884,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | import os
from utils.functions import get_db_url
# 放置所有配置相关的操作,形同Django的setting.py
# 基础路径
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 页面模板
templates_dir = os.path.join(BASE_DIR,"templates")
# 静态模板
static_dir = os.path.join(BASE_DIR,"static")
# 连接数据库
DATABASE = {
# 用户
'USER':'root',
# 密码
'PASSWORD':'5201314',
# 端口
'PORT':'3306',
# 地址 127.0.0.1
'HOST':'127.0.0.1',
# 数据库
'DB':'mysql',
# 驱动
# pymysql --> 驱动 -- python3没有mysqldb,所以需要以pymysql为媒介,来操纵mysql
'DRIVER':'pymysql',
# 数据库名称
'NAME': 'flask_20180518'
}
# 连接数据库
SQLALCHEMY_DATABASE_URI = get_db_url(DATABASE) | [
"[email protected]"
] | |
fde93cf368188861818c16d04c7084c0c3f7a84a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fizz_20200607124449.py | 500600f0a3fd3de68b394839af6489449e8e04aa | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | def fizz(num):
newNumber = []
for i in range(1,num+1):
newNumber.append(i)
for j in range(len(newNumber)):
if newNumber[j] % 3== 0:
newNumber[j] = "Fizz"
elif newNumber[j] % 5 == 0:
newNumber[j] = "Buzz"
elif newNumber[j] % 3 == 0 and newNumber[j] % 5 == 0:
else:
newNumber[j] = newNumber[j]
print(newNumber)
fizz(8) | [
"[email protected]"
] | |
4965dbda8f463902986b1c3f3580791ed62f75cd | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/vse-naloge-brez-testov/DN4-M-96.py | e73ef8577fed38746d0fd9c7193bff1d7ac9d17f | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # Tu pišite svoje funkcije:
from math import*
def koordinate(ime, kraji):
s = ()
for i, x, y in kraji:
if ime == i:
s = s + (x,y)
return(s)
else:
return None
def razdalja_koordinat(x1, y1, x2, y2):
razdalja = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
return razdalja
def razdalja(ime1, ime2, kraji):
xprva, yprva = koordinate(ime1, kraji)
xdruga, ydruga = koordinate(ime2, kraji)
return razdalja_koordinat(xprva, yprva, xdruga, ydruga)
def v_dometu(ime, domet, kraji):
z = []
for imeTest, xTest, yTest in kraji:
if(ime != imeTest):
if razdalja(ime, imeTest, kraji) <= domet:
z.append(imeTest)
return z
def najbolj_oddaljeni(ime, imena, kraji):
najvecjaTest = 0
for ime1 in imena:
razTest = razdalja(ime, ime1, kraji)
if(razTest > najvecjaTest):
najvecjaTest = razTest
izpis = ime1
return izpis
def zalijemo(ime, domet, kraji):
imena = v_dometu(ime, domet, kraji)
return najbolj_oddaljeni(ime, imena, kraji)
def presek(s1, s2):
u = []
for a in s1:
for b in s2:
if a == b:
u.append(a)
return u
def skupno_zalivanje(ime1, ime2, domet, kraji):
c = v_dometu(ime1, domet, kraji)
d = v_dometu(ime2, domet, kraji)
e = presek(c, d)
return e
| [
"[email protected]"
] | |
f6f0b4e89f6ced5ee2a6039dd64dee6a3c957630 | 6a2b1b1d6092a8d2492a6677b6fd19d27b0f461f | /14-Python-Advance/06-python-regex/04-Match-Object/02-Match-Attributes/02-Match-pos-endpos-attribute.py | f9cd514798dea81ea6dd5771c3af7ef8e2ded8ef | [] | no_license | Uttam1982/PythonTutorial | 3cfbe237199e048967502f3d0c1936f2b878cb87 | 8e28cc5c4be5826a011059db66f6952871248c82 | refs/heads/master | 2022-12-17T18:47:28.397383 | 2020-09-22T08:55:23 | 2020-09-22T08:55:23 | 288,524,784 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | # match.pos
# match.endpos
#--------------------------------------------------------------------------------------------
# Contain the effective values of <pos> and <endpos> for the search.
# Remember that some methods, when invoked on a compiled regex,
# accept optional <pos> and <endpos> arguments that limit the search
# to a portion of the specified search string. These values are accessible
# from the match object with the .pos and .endpos attributes
#--------------------------------------------------------------------------------------------
import re
re_obj = re.compile(r'\d+')
m = re_obj.search('foo123bar', 2, 7)
# Output : <re.Match object; span=(3, 6), match='123'>
print(m)
# Output : 2,7
print(m.pos, m.endpos)
# If the <pos> and <endpos> arguments aren’t included in the call,
# either because they were omitted or because the function in question
# doesn’t accept them, then the .pos and .endpos attributes effectively
# indicate the start and end of the string:
#--------------------------------------------------------------------------------------------
re_obj = re.compile(r'\d+')
m = re_obj.search('foo123bar')
# Output : <re.Match object; span=(3, 6), match='123'>
print(m)
# Output : 0,9
print(m.pos, m.endpos)
#-------------------------------------------------------------------------------------------- | [
"[email protected]"
] | |
de72767d2ff972eb773a57d625072fad3a34062a | fb605733922e6e65c3a8537e0eddca18a98bd161 | /bin/sql-to-markdown | bb2563e15394de6c4e827ed47f792398a8323fff | [] | no_license | EarthCubeGeochron/Sparrow-docs | 5f3c73851d8a0a4ddb3b019c81b7e6066d3988a9 | ad6ad2e337e98170fb33a5384640ff01c9462f03 | refs/heads/master | 2020-04-29T07:02:42.555203 | 2019-04-18T22:25:32 | 2019-04-18T22:25:32 | 175,939,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import fileinput
import pyparsing
from textwrap import dedent
print("~~~sql")
in_doc = False
docs = ""
for line in fileinput.input():
docstart = "/*" in line
docend = "*/" in line
if docstart:
in_doc = True
if not in_doc:
print(line, end="")
continue
if in_doc:
docs += line
if docend:
in_doc = False
if not in_doc and docs != "":
docs = (dedent(docs)
.replace("/*","\n~~~\n\n")
.replace("*/","\n\n~~~sql\n"))
print(dedent(docs), end="")
docs = ""
print("~~~")
| [
"[email protected]"
] | ||
2c3d5b4e9124467eb3bf4af0a2652746057a9351 | 45ba55b4fbdaf1657fde92beaeba4f173265afcd | /tests/b.py | f646816b0c9a3e696aef98558f548199055989fd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | strawberry-graphql/strawberry | af96afd4edd1788c59e150597a12501fbc7bf444 | 6d86d1c08c1244e00535840d9d87925431bc6a1c | refs/heads/main | 2023-08-30T03:34:12.929874 | 2023-08-24T12:01:09 | 2023-08-24T12:01:09 | 162,690,887 | 3,408 | 529 | MIT | 2023-09-14T21:49:44 | 2018-12-21T08:56:55 | Python | UTF-8 | Python | false | false | 779 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from typing_extensions import Annotated
import strawberry
if TYPE_CHECKING:
from tests.a import A
@strawberry.type
class B:
id: strawberry.ID
@strawberry.field
async def a(self) -> Annotated[A, strawberry.lazy("tests.a"), object()]:
from tests.a import A
return A(id=self.id)
@strawberry.field
async def optional_a(
self,
) -> Annotated[A, strawberry.lazy("tests.a"), object()] | None:
from tests.a import A
return A(id=self.id)
@strawberry.field
async def optional_a2(
self,
) -> Optional[Annotated[A, strawberry.lazy("tests.a"), object()]]:
from tests.a import A
return A(id=self.id)
| [
"[email protected]"
] | |
43e50a203f2e925342a83dcf7976dde81970e1e1 | bc441bb06b8948288f110af63feda4e798f30225 | /ucpro_sdk/model/notify/operation_log_pb2.pyi | fc93c0734ad12ed730d7c1a374e421a9a3cbc2ca | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,962 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
from ucpro_sdk.model.notify.app_pb2 import (
App as ucpro_sdk___model___notify___app_pb2___App,
)
from ucpro_sdk.model.notify.deploy_info_pb2 import (
DeployInfo as ucpro_sdk___model___notify___deploy_info_pb2___DeployInfo,
)
from ucpro_sdk.model.notify.device_pb2 import (
Device as ucpro_sdk___model___notify___device_pb2___Device,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class OperationLog(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
system = ... # type: typing___Text
topic = ... # type: typing___Text
event_id = ... # type: typing___Text
parent_event_id = ... # type: typing___Text
event = ... # type: typing___Text
status = ... # type: typing___Text
operator = ... # type: typing___Text
target_name = ... # type: typing___Text
target_id = ... # type: typing___Text
target_category = ... # type: typing___Text
notifiers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
trigger = ... # type: typing___Text
memo = ... # type: typing___Text
app_id = ... # type: typing___Text
cluster_id = ... # type: typing___Text
package_id = ... # type: typing___Text
package_name = ... # type: typing___Text
version_id = ... # type: typing___Text
version_name = ... # type: typing___Text
content = ... # type: typing___Text
data_name = ... # type: typing___Text
ip = ... # type: typing___Text
ip_list = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
subject = ... # type: typing___Text
mtime = ... # type: builtin___int
ctime = ... # type: builtin___int
@property
def parent_event(self) -> OperationLog: ...
@property
def device_list(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ucpro_sdk___model___notify___device_pb2___Device]: ...
@property
def app_list(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ucpro_sdk___model___notify___app_pb2___App]: ...
@property
def ext_info(self) -> google___protobuf___struct_pb2___Struct: ...
@property
def deploy_info(self) -> ucpro_sdk___model___notify___deploy_info_pb2___DeployInfo: ...
def __init__(self,
*,
system : typing___Optional[typing___Text] = None,
topic : typing___Optional[typing___Text] = None,
event_id : typing___Optional[typing___Text] = None,
parent_event_id : typing___Optional[typing___Text] = None,
parent_event : typing___Optional[OperationLog] = None,
event : typing___Optional[typing___Text] = None,
status : typing___Optional[typing___Text] = None,
device_list : typing___Optional[typing___Iterable[ucpro_sdk___model___notify___device_pb2___Device]] = None,
operator : typing___Optional[typing___Text] = None,
target_name : typing___Optional[typing___Text] = None,
target_id : typing___Optional[typing___Text] = None,
target_category : typing___Optional[typing___Text] = None,
app_list : typing___Optional[typing___Iterable[ucpro_sdk___model___notify___app_pb2___App]] = None,
ext_info : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
notifiers : typing___Optional[typing___Iterable[typing___Text]] = None,
trigger : typing___Optional[typing___Text] = None,
memo : typing___Optional[typing___Text] = None,
app_id : typing___Optional[typing___Text] = None,
cluster_id : typing___Optional[typing___Text] = None,
package_id : typing___Optional[typing___Text] = None,
package_name : typing___Optional[typing___Text] = None,
version_id : typing___Optional[typing___Text] = None,
version_name : typing___Optional[typing___Text] = None,
deploy_info : typing___Optional[ucpro_sdk___model___notify___deploy_info_pb2___DeployInfo] = None,
content : typing___Optional[typing___Text] = None,
data_name : typing___Optional[typing___Text] = None,
ip : typing___Optional[typing___Text] = None,
ip_list : typing___Optional[typing___Iterable[typing___Text]] = None,
subject : typing___Optional[typing___Text] = None,
mtime : typing___Optional[builtin___int] = None,
ctime : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> OperationLog: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> OperationLog: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"deploy_info",b"deploy_info",u"ext_info",b"ext_info",u"parent_event",b"parent_event"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"app_id",b"app_id",u"app_list",b"app_list",u"cluster_id",b"cluster_id",u"content",b"content",u"ctime",b"ctime",u"data_name",b"data_name",u"deploy_info",b"deploy_info",u"device_list",b"device_list",u"event",b"event",u"event_id",b"event_id",u"ext_info",b"ext_info",u"ip",b"ip",u"ip_list",b"ip_list",u"memo",b"memo",u"mtime",b"mtime",u"notifiers",b"notifiers",u"operator",b"operator",u"package_id",b"package_id",u"package_name",b"package_name",u"parent_event",b"parent_event",u"parent_event_id",b"parent_event_id",u"status",b"status",u"subject",b"subject",u"system",b"system",u"target_category",b"target_category",u"target_id",b"target_id",u"target_name",b"target_name",u"topic",b"topic",u"trigger",b"trigger",u"version_id",b"version_id",u"version_name",b"version_name"]) -> None: ...
| [
"[email protected]"
] | |
010368ccaa1fe15b704c69b83731653d4d069dff | 9709da49cf74e0f1248848e029c5084af22e4e27 | /e2yun_addons/odoo12/wx_tools/models/res_partner.py | 56975b035d93ea387a4aa843381cea659c9a17e1 | [] | no_license | guwenfeng/filelib | 27ff44ecac988bbf1ed30e7e4d33bdfbc790393f | bc9003b47f06d7f10c0dc93034179d0fafccc683 | refs/heads/master | 2020-06-30T21:27:49.204212 | 2019-08-06T09:58:32 | 2019-08-06T09:58:32 | 200,955,551 | 0 | 1 | null | 2019-08-07T02:17:12 | 2019-08-07T02:17:12 | null | UTF-8 | Python | false | false | 3,851 | py | # coding=utf-8
import logging
from geopy.distance import vincenty
from odoo import models, fields, api
_logger = logging.getLogger(__name__)
class WXResPartner(models.Model):
_inherit = 'res.partner'
wxcorp_user_id = fields.Many2one('wx.corpuser', '关联企业号用户')
wx_user_id = fields.Many2one('wx.user', '微信公众用户')
wxlatitude = fields.Float('纬度', digits=(10, 7))
wxlongitude = fields.Float('经度', digits=(10, 7))
wxprecision = fields.Float('位置精度', digits=(10, 7))
location_write_date = fields.Datetime("更新时间", readonly=True)
wx_address = fields.Char(u'地址', compute='_get_address')
near_team = fields.Char(u'附近门店', compute='_get_near_team')
@api.one
def _get_near_team(self):
_logger.info(self)
@api.one
def _get_address(self):
# 获取用户位置
from ..controllers import amapapi
if self.wxlatitude and self.wxlongitude:
wx_location = '%s,%s' % (self.wxlongitude, self.wxlatitude)
convert_location = amapapi.coordinateconvert(self, wx_location)
location = convert_location.split(';')[0] # 用户真实位置
formatted_address = amapapi.geocoderegeo(self, location)
if formatted_address:
self.wx_address = formatted_address
newport_ri = (location.split(',')[1], location.split(',')[0])
crm_team_pool = self.env['crm.team'].search([])
search_read_new = []
for crm_team in crm_team_pool:
if crm_team.longitude != 0.0 or crm_team.longitude != 0.0:
cleveland_oh = (crm_team.latitude, crm_team.longitude)
pos_kilometers = vincenty(newport_ri, cleveland_oh).kilometers
crm_team.distance = pos_kilometers
search_read_new.append(crm_team)
# _logger.info("门店与用户距离%s" % pos_kilometers)
if search_read_new:
min_distance = (min(search_read_new, key=lambda dict: dict['distance']))
self.near_team = '%s:距离%s公里' % (min_distance.street, min_distance.distance)
_logger.info("获取门店信息")
# def _compute_im_status(self):
# super(WXResPartner, self)._compute_im_status()
def send_corp_msg(self, msg):
from ..rpc import corp_client
entry = corp_client.corpenv(self.env)
mtype = msg["mtype"]
if mtype == "text":
entry.client.message.send_text(entry.current_agent, self.wxcorp_user_id.userid, msg["content"])
if mtype == "card":
entry.client.message.send_text_card(entry.current_agent, self.wxcorp_user_id.userid, msg['title'],
msg['description'], msg['url'], btntxt=msg.get("btntxt", "详情"))
elif mtype == 'image':
ret = entry.client.media.upload(mtype, msg['media_data'])
entry.client.message.send_image(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])
elif mtype == 'voice':
ret = entry.client.media.upload(mtype, msg['media_data'])
entry.client.message.send_voice(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])
def get_corp_key(self):
if self.wxcorp_user_id:
return self.wxcorp_user_id.userid
def get_wx_key(self):
if self.wx_user_id:
return self.wx_user_id.openid
@api.multi
def write(self, vals):
resusers = super(WXResPartner, self).write(vals)
if vals.get('wx_user_id') and self.user_ids.wx_user_id.id != vals.get('wx_user_id'):
self.user_ids.wx_user_id = vals.get('wx_user_id')
self.user_ids.wx_id = self.user_ids.wx_user_id.openid
return resusers
| [
"[email protected]"
] | |
ac6d3308b07574f96e885441150f069ca1b51439 | 29db04da44804aae807f113dc0bb9134563b084e | /Python2-Core/src/main/prompto/statement/MethodCall.py | b483b2baa4a61ce28a56d38c5583e9fdd8129d0d | [] | no_license | prompto/prompto-python2 | 0f84e6e6f0f6889a4c866fc300d351fd7ee8a6b1 | f929df1dd55e0b5d8ec9f39398a924503fce4019 | refs/heads/master | 2022-12-07T12:41:27.297404 | 2022-11-27T17:37:43 | 2022-11-27T17:37:43 | 32,623,630 | 3 | 1 | null | 2019-05-04T11:01:41 | 2015-03-21T07:17:05 | Python | UTF-8 | Python | false | false | 8,090 | py | from prompto.declaration.IDeclaration import IDeclaration
from prompto.declaration.AbstractMethodDeclaration import AbstractMethodDeclaration
from prompto.declaration.ArrowDeclaration import ArrowDeclaration
from prompto.error.PromptoError import PromptoError
from prompto.error.SyntaxError import SyntaxError
from prompto.grammar.ArgumentList import ArgumentList
from prompto.statement.SimpleStatement import SimpleStatement
from prompto.declaration.ConcreteMethodDeclaration import ConcreteMethodDeclaration
from prompto.runtime.Context import MethodDeclarationMap
from prompto.runtime.MethodFinder import MethodFinder
from prompto.declaration.ClosureDeclaration import ClosureDeclaration
from prompto.type.MethodType import MethodType
from prompto.type.VoidType import VoidType
from prompto.value.ArrowValue import ArrowValue
from prompto.value.ClosureValue import ClosureValue
from prompto.value.BooleanValue import BooleanValue
from prompto.parser.Dialect import Dialect
from prompto.utils.CodeWriter import CodeWriter
class MethodCall(SimpleStatement):
def __init__(self, selector, arguments=None):
super(MethodCall, self).__init__()
self.selector = selector
self.arguments = arguments
def __str__(self):
suffix = str(self.arguments) if self.arguments is not None else ""
return str(self.selector) + suffix
def check(self, context):
finder = MethodFinder(context, self)
declaration = finder.findBest(False)
if declaration is None:
return VoidType.instance
if declaration.isAbstract():
self.checkAbstractOnly(context, declaration)
return VoidType.instance if declaration.returnType is None else declaration.returnType
else:
local = context if self.isLocalClosure(context) else self.selector.newLocalCheckContext(context, declaration)
return self.checkDeclaration(declaration, context, local)
def checkAbstractOnly(self, context, declaration):
if declaration.isReference: # parameter or variable populated from a method call
return
if declaration.memberOf is not None: # the category could be subclassed (if constructor called on abstract, that would raise an error anyway)
return
# if a global method, need to check for runtime dispatch
finder = MethodFinder(context, self)
potential = finder.findPotential()
if potential.all(lambda decl: decl.isAbstract()):
raise SyntaxError("Cannot call abstract method")
def checkReference(self, context):
finder = MethodFinder(context, self)
method = finder.findBest(False)
if method is not None:
return MethodType(method)
else:
return None
def isLocalClosure(self, context):
if self.selector.parent is not None:
return False
decl = context.getLocalDeclaration(IDeclaration, self.selector.name)
return isinstance(decl, MethodDeclarationMap)
def checkDeclaration(self, declaration, parent, local):
if isinstance(declaration, ConcreteMethodDeclaration) and declaration.mustBeBeCheckedInCallContext(parent):
return self.fullCheck(declaration, parent, local)
else:
return self.lightCheck(declaration, local)
def lightCheck(self, declaration, local):
declaration.registerParameters(local)
return declaration.check(local)
def fullCheck(self, declaration, parent, local):
try:
arguments = self.makeArguments(parent, declaration)
declaration.registerParameters(local)
for argument in arguments:
expression = argument.resolve(local, declaration, True)
value = argument.getParameter().checkValue(parent, expression)
local.setValue(argument.getName(), value)
return declaration.check(local)
except PromptoError as e:
raise SyntaxError(e.message)
def makeArguments(self, context, declaration):
if self.arguments is None:
return ArgumentList()
else:
return self.arguments.makeArguments(context, declaration)
def interpret(self, context):
finder = MethodFinder(context, self)
declaration = finder.findBest(True)
if declaration is None:
raise SyntaxError("No such method: " + str(self))
local = self.selector.newLocalContext(context, declaration)
declaration.registerParameters(local)
self.assignArguments(context, local, declaration)
return declaration.interpret(local)
def assignArguments(self, context, local, declaration):
arguments = self.makeArguments(context, declaration)
for argument in arguments:
expression = argument.resolve(local, declaration, True)
parameter = argument.getParameter()
value = parameter.checkValue(context, expression)
if value is not None and parameter.mutable and not value.mutable:
from prompto.error.NotMutableError import NotMutableError
raise NotMutableError()
local.setValue(argument.getName(), value)
def interpretReference(self, context):
declaration = self.findDeclaration(context)
return ClosureValue(context, MethodType(declaration))
def interpretAssert(self, context, testMethodDeclaration):
value = self.interpret(context)
if isinstance(value, BooleanValue):
return value.value
else:
writer = CodeWriter(self.dialect, context)
self.toDialect(writer)
raise SyntaxError("Cannot test '" + str(writer) + "'")
def findDeclaration(self, context):
method = self.findRegistered(context)
if method is not None:
return method
else:
finder = MethodFinder(context, self)
return finder.findBest(True)
def findRegistered(self, context):
if self.selector.getParent() is None:
try:
o = context.getValue(self.selector.getName())
if isinstance(o, ClosureValue):
return self.getClosureDeclaration(context, o)
elif isinstance(o, ArrowValue):
return ArrowDeclaration(o)
except PromptoError:
pass
return None
def getClosureDeclaration(self, context, closure):
decl = closure.itype.method
if decl.memberOf is not None:
# the closure references a member method (useful when a method reference is needed)
# in which case we may simply want to return that method to avoid spilling context into method body
# this is only true if the closure comes straight from the method's instance context
# if the closure comes from an accessible context that is not the instance context
# then it is a local variable that needs the closure context to be interpreted
declaring = context.contextForValue(self.selector.getName())
if declaring == closure.context:
return decl
return ClosureDeclaration(closure)
def toDialect(self, writer):
if self.requiresInvoke(writer):
writer.append("invoke: ")
self.selector.toDialect(writer, False)
if self.arguments is not None:
self.arguments.toDialect(writer)
elif writer.dialect is not Dialect.E:
writer.append("()")
def requiresInvoke(self, writer):
if writer.dialect is not Dialect.E or (self.arguments is not None and len(self.arguments) > 0):
return False
try:
finder = MethodFinder(writer.context, self)
declaration = finder.findBest(False)
# if method is a reference, need to prefix with invoke
return declaration.isAbstract() or declaration.closureOf is not None
except:
pass
# ok
return False | [
"[email protected]"
] | |
3bfb51f0aa7f3004baf108569efcc7120a220413 | d60ee49abaee6c74c5b777f8f112a7f75f71f029 | /genome/variants2/filter/VCF/somatic/genes/indels/rare_variants.py | 6062b31fd198971fea91fd9fa2620ed59197a345 | [] | no_license | ak352/melanomics | 41530f623b4bfdbd5c7b952debcb47622d1a8e88 | fc5e6fdb1499616fb25a8dc05259add8a65aeca0 | refs/heads/master | 2020-12-24T16:14:42.271416 | 2015-08-06T12:48:52 | 2015-08-06T12:48:52 | 18,439,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | import sys
from common_rsid import get_non_flagged
#Creates a dictionary of fields pointing to column numbers, makes the code more readable
def ParseFields(line):
fields = {}
var = line[:-1].split("\t")
for x in range(0, len(var)):
fields[var[x]] = x
return fields
def read_tsv(infile):
with open(infile) as f:
var = ParseFields(next(f))
for line in f:
record = {}
line = line[:-1].split("\t")
for x in var:
record[x] = line[var[x]]
record["all"] = "\t".join(line)
yield record
def report(line, log):
for s in sys.stderr, log:
s.write(line)
if __name__ == "__main__":
infile = sys.argv[1]
outfile = sys.argv[2]
logfile = outfile + ".log"
percent_threshold = 0.01
out, log = [open(x, "w") for x in [outfile, logfile]]
annotations = [line[:-1] for line in open("frequency_annotations")]
non_flagged = get_non_flagged()
report("Input: %s\n" % infile, log)
report("Output: %s\n" % outfile, log)
report("Log file: %s\n" % logfile, log)
report("Population frequency threshold = %1.2f\n" % percent_threshold, log)
rare, num_common, total = 0,0,0
""" Output the header """
out.write(next(open(infile)))
count = {}
for anno in annotations:
count[anno] = 0
count["dbsnp"] = 0
count["avsnp"] = 0
""" Filter the variants """
for record in read_tsv(infile):
is_common = False
for annotation in annotations:
if record[annotation]:
if float(record[annotation]) >= percent_threshold:
is_common = True
count[annotation] += 1
if record['snp138'] in non_flagged:
is_common=True
count['dbsnp'] += 1
if record['avsnp138'] in non_flagged:
is_common=True
count['avsnp'] += 1
total += 1
if is_common:
num_common += 1
else:
rare += 1
out.write(record["all"] + "\n")
report("STATISTICS:\n", log)
report("Total variants = %d\n" % total, log)
report("Common variants = %d (%2.2f%%)\n" % (num_common, float(num_common)*100/float(total)), log)
report("Of which:\n", log)
for annotation in annotations+['dbsnp', 'avsnp']:
report("\tVariants found in %s = %d (%2.2f%%)\n" % (annotation, count[annotation], float(count[annotation])*100/float(total)), log)
report("Rare variants = %d\n" % rare, log)
| [
"[email protected]"
] | |
b1aa5b9263b56dd44182ac4f6d3ae9f712c62267 | 5b2590bc39cf0cb44b7257f2be3be72a0e58d068 | /torchkbnufft/nufft/sparse_interp_mat.py | 4f2ef88a46f98241db14e644a685efbb30416c0f | [
"MIT"
] | permissive | zaccharieramzi/torchkbnufft | efc59f4bed6675173b8064d5dec7ec9eeb43b0f2 | 37e5808ab73ddb52cbd4655f3d7fd6273b3dd89a | refs/heads/master | 2020-12-26T08:17:44.180049 | 2020-08-03T17:17:40 | 2020-08-03T17:17:40 | 273,199,030 | 0 | 0 | MIT | 2020-06-18T09:39:00 | 2020-06-18T09:39:00 | null | UTF-8 | Python | false | false | 5,225 | py | import itertools
import numpy as np
import torch
from ..math import complex_mult, conj_complex_mult
from .interp_functions import calc_coef_and_indices
def get_interpob(model):
"""Retrieves the interpolation dictionary from model.
Different nufft objects use different interpolation objects. This function
only extracts the minimum amount necessary for sparse matrix
precomputation.
Args:
model (TorchKbNufft object): A TorchKbNufft object with attributes for
forming a KbNufft interpolation dictionary.
Returns:
dict: A dictionary with interpolation parameters.
"""
interpob = dict()
interpob['table'] = []
for i in range(len(model.table)):
interpob['table'].append(getattr(model, 'table_tensor_' + str(i)))
interpob['grid_size'] = model.grid_size_tensor
interpob['numpoints'] = model.numpoints_tensor
interpob['table_oversamp'] = model.table_oversamp_tensor
return interpob
def compute_forw_mat(dims, table, numpoints, Jlist, L, tm):
"""Compute a forward Kaiser-Bessel interpolation sparse matrix.
Args:
dims (tensor): A list of sizes of each dimension.
table (tensor): A list of interpolation tables.
numpoints (tensor): A list of numbers of nearest neighbors for each
dimension.
Jlist (tensor): A list of nearest neighbor configurations.
L (tensor): A list of table sizes for each dimension.
tm (tensor): An array of normalized frequency locations.
Returns:
tuple: A 2-length tuple with a sparse interpolation matrix in each
element. The first matrix has the real coefficients; the second
has the imaginary.
"""
dtype = table[0].dtype
device = table[0].device
int_type = torch.long
nJ = Jlist.shape[1]
# center of tables
centers = torch.floor(numpoints * L / 2).to(dtype=int_type)
# offset from k-space to first coef loc
kofflist = 1 + torch.floor(tm - numpoints.unsqueeze(1) / 2.0)
# do a bit of type management - ints for faster index comps
dims = dims.to(dtype=int_type)
kofflist = kofflist.to(dtype=int_type)
Jlist = Jlist.to(dtype=int_type)
# initialize the sparse matrices
coef_mat_real = torch.sparse.FloatTensor(
tm.shape[-1], torch.prod(dims)).to(dtype=dtype, device=device)
coef_mat_imag = torch.sparse.FloatTensor(
tm.shape[-1], torch.prod(dims)).to(dtype=dtype, device=device)
# loop over offsets and take advantage of broadcasting
for Jind in range(nJ):
coef, arr_ind = calc_coef_and_indices(
tm, kofflist, Jlist[:, Jind], table, centers, L, dims)
sparse_coords = torch.stack(
(
torch.arange(
arr_ind.shape[0],
dtype=arr_ind.dtype,
device=arr_ind.device
),
arr_ind
)
)
coef_mat_real = coef_mat_real + torch.sparse.FloatTensor(
sparse_coords,
coef[0],
torch.Size((arr_ind.shape[0], torch.prod(dims)))
)
coef_mat_imag = coef_mat_imag + torch.sparse.FloatTensor(
sparse_coords,
coef[1],
torch.Size((arr_ind.shape[0], torch.prod(dims)))
)
return coef_mat_real, coef_mat_imag
def precomp_sparse_mats(om, model):
"""Precompute sparse interpolation matrices.
Args:
om (tensor): The k-space trajectory in radians/voxel.
model (TorchKbNufft object): A KbNufft type object with attributes for
creating a KbNufft interpolation object.
Returns:
tuple: A 2-length tuple with lists of sparse interpolation matrices in
each element. The first matrix has the real coefficient matrices;
the second has the imaginary.
"""
interpob = get_interpob(model)
dtype = interpob['table'][0].dtype
device = interpob['table'][0].device
# extract interpolation params and match device and dtype to input
table = interpob['table']
grid_size = interpob['grid_size']
numpoints = interpob['numpoints']
table_oversamp = interpob['table_oversamp']
ndims = om.shape[1]
M = om.shape[2]
# convert to normalized freq locs
tm = torch.zeros(size=om.shape, dtype=dtype, device=device)
Jgen = []
for i in range(ndims):
gam = (2 * np.pi / grid_size[i])
tm[:, i, :] = om[:, i, :] / gam
Jgen.append(range(np.array(numpoints[i].cpu(), dtype=np.int)))
# build an iterator for going over all J values
Jgen = list(itertools.product(*Jgen))
coef_real_mats = []
coef_imag_mats = []
for norm_traj in tm:
coef_mat_real, coef_mat_imag = compute_forw_mat(
grid_size.to(dtype=dtype, device=device),
table,
numpoints,
torch.tensor(
np.transpose(np.array(Jgen)),
dtype=dtype,
device=device
),
table_oversamp,
norm_traj
)
coef_real_mats.append(coef_mat_real)
coef_imag_mats.append(coef_mat_imag)
return coef_real_mats, coef_imag_mats
| [
"[email protected]"
] | |
445094ac9443d4043d0a46684d8716d700979415 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/pyinstaller/tests/old_suite/interactive/test_pygame.py | e3c84c9dfc7af1c578e2a92a953046ea790ce1d4 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3026e58b95b7ac74eb29a2e0e663c0a7e96472b2bf98d1be45c60efdd0b9458b
size 922
| [
"[email protected]"
] | |
50fa5a76956a5e9f87fc8b39fcc81f05fde6feeb | 365967082720f3fda31afccfc237b7a67e8ffc07 | /math/UglyNumber.py | 80cd2786b22e2532548ebd0cad9f942395e607c6 | [] | no_license | hulaba/geekInsideYou | ec68dee3fa24d63f5470aa40b600ef34d37c5da1 | 72c1f1b4fbf115db91c908a68c9ac3ca4cb22a4f | refs/heads/master | 2022-12-11T11:11:03.149336 | 2020-09-12T16:12:40 | 2020-09-12T16:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | def divide(no, div):
while no % div == 0:
no = no / div
return no
def isUgly(num):
num = divide(num, 2)
num = divide(num, 3)
num = divide(num, 5)
return 1 if num == 1 else 0
def nthUgly(n):
count = 1
i = 1
while n > count:
i += 1
if isUgly(i):
count += 1
return i
def main():
print(nthUgly(15))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
552b509615b8838ca1c4f36eb4f2afc80db01eb6 | 377d86194fd6d23c8ef3df3e6f7d90092dd8f9b4 | /workout_tracker/auth/urls.py | 81a75b085386e9757309abff228e83dd0d38bc59 | [
"MIT"
] | permissive | e-dang/Workout-Tracker | f20f44b012e895244bad413a46103415ffae5732 | 00a27597ea628cff62b320d616f56b2df4f344a0 | refs/heads/master | 2022-12-28T07:49:34.179307 | 2020-10-12T20:48:28 | 2020-10-12T20:48:28 | 293,937,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.urls import path, include
from rest_auth.views import PasswordResetConfirmView
urlpatterns = [
path('', include('rest_auth.urls')),
path('register/', include('rest_auth.registration.urls')),
path('password-reset-confirm/<uidb64>/<token>/', PasswordResetConfirmView.as_view(), name='password_reset_confirm')
]
| [
"[email protected]"
] | |
82c411312abd5617b27f1cfbea9d327ce60f4dc8 | 51b6d2fc53d5c632fcf01319842baebf13901e84 | /atcoder.jp/abc196/abc196_d/Main.py | 08662bac5d300ada3cd847e151140d34667825e1 | [] | no_license | mono-0812/procon | 35db3b2c21eff74fbd7b52db07f249380f6834ef | 68a4b53880a228a0164052b23d1326363efcbc20 | refs/heads/master | 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | import bisect,collections,copy,heapq,itertools,math,string,sys,queue
def I(): return input()
def IS(): return input().split()
def II(): return int(input())
def IIS(): return map(int,input().split())
def LIIS(): return list(map(int,input().split()))
def ZER(N): return [False for _ in range(N)]
INF=float("inf")
MOD=10**9+7
def make_divisors(n):
lower_divisors , upper_divisors = [], []
i = 1
while i*i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n//i)
i += 1
return lower_divisors + upper_divisors[::-1]
##############################################################################
h,w,a,b=IIS()
used=[[0]*w for _ in range(h)]
res=0
def dfs(i,j,a,b):
global res
if a<0 or b<0:
return
if j==w:
j=0
i+=1
if i==h:
res+=1
return
if used[i][j]==1:
return dfs(i,j+1,a,b)
used[i][j]=1
dfs(i,j+1,a,b-1)
if j+1 < w and used[i][j+1]==0:
used[i][j+1]=1
dfs(i,j+1,a-1,b)
used[i][j+1]=0
if i+1<h and used[i+1][j]==0:
used[i+1][j]=1
dfs(i,j+1,a-1,b)
used[i+1][j]=0
used[i][j]=0
return res
print(dfs(0,0,a,b))
| [
"[email protected]"
] | |
263f48233234deeabc43b9fed764e143d380396d | dd63082fe701392a132af4c52fba3ce2fb76e84c | /virtual/bin/django-admin.py | 52ff4c4bd001d26a8f0e187c372daeff74962e1b | [] | no_license | CollinsMuiruri/multivendor | b53bfbe773e86d1793d09d87e4b8ecada79a7823 | cf68cb3e9dbca8040dc781e173712f8e043cb42f | refs/heads/main | 2023-04-11T07:54:04.488449 | 2021-04-19T23:05:40 | 2021-04-19T23:05:40 | 359,611,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | #!/home/collins/Documents/proxy-services/multivendor/virtual/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
] | |
ea5160d5e285bd0f2ad8621191591fd191836803 | ebf934fb6fd4e0ebbd870db857897fbb9d8022b7 | /pyspark/ml/other/spark_ml_02.py | 4e0ecd29cb3ae2cc49e494d27fa5864a6c628830 | [] | no_license | AidenLong/ai | 6ce2bcf5928f8350ba8b440e9032ea4c39dd69ec | 0901e6010bbb51a165680e52d9adaeec7e510dc1 | refs/heads/master | 2020-05-03T13:27:38.698490 | 2019-08-22T03:18:09 | 2019-08-22T03:18:09 | 178,653,209 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | # --encoding:utf-8 --
import os
from pyspark.mllib.stat import Statistics
from pyspark import SparkConf, SparkContext
if 'SPARK_HOME' not in os.environ:
os.environ['SPARK_HOME'] = 'D:\syl\dev\spark-1.6.1-bin-2.5.0-cdh5.3.6'
# 创建Spark上下文
conf = SparkConf() \
.setMaster('local') \
.setAppName('spark ml 02')
sc = SparkContext(conf=conf)
# 构建一个Vector的RDD
vector_rdd = sc.parallelize([
[0, 2, 3],
[4, 8, 16],
[-7, 8, -9],
[10, -10, 12]
])
# 1. 汇总统计
summary = Statistics.colStats(vector_rdd)
print("汇总对象类型:{}".format(type(summary)))
print("各个特征属性的均值:{}".format(summary.mean()))
print("各个特征属性的方差:{}".format(summary.variance()))
print("样本数据量:{}".format(summary.count()))
print("各个特征属性的最大特征值:{}".format(summary.max()))
print("各个特征属性的最小特征值:{}".format(summary.min()))
print("特征值不等于0的样本数量:{}".format(summary.numNonzeros()))
print("各个特征的L1范式值:{}".format(summary.normL1()))
print("各个特征的L2范式值:{}".format(summary.normL2()))
# 2. 相关性统计(特征与特征之间的相关性统计)
x = sc.parallelize([1.0, 1.5, 0.9, 0, 0.85, 0.95, 0.5])
y = sc.parallelize([2.0, 2.1, 0, 2.0, 0, 2.21, 0])
print("x和y的相关性指标值为:{}".format(Statistics.corr(x, y)))
# method给定相关性计算方式,默认为pearson(皮尔逊相关系数),另外可选:spearman(斯皮尔曼相关性系数)
feature_corr = Statistics.corr(vector_rdd, method='pearson')
print("RDD对象中特征属性与特征属性之间的相关性指标为:\n{}".format(feature_corr))
| [
"[email protected]"
] | |
d9ad09a9d01aacf6b03f7ada5b2712a773f66e3c | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /11_Time_Series_Forecasting_with_Python/02/features_lag1.py | ec593a95a20499aae3844259bf5a6e6eb9850287 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | # create a lag feature
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
series = read_csv('daily-minimum-temperatures.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
temps = DataFrame(series.values)
dataframe = concat([temps.shift(1), temps], axis=1)
dataframe.columns = ['t', 't+1']
print(dataframe.head(5))
| [
"[email protected]"
] | |
a6f64d216a61ec6b1ffd4aea86d1986532e794ab | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /autosar/models/data_transformation_subtypes_enum.py | a50f8760e3b21482a2c78717041e0d8b56a05c4d | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 166 | py | from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class DataTransformationSubtypesEnum(Enum):
DATA_TRANSFORMATION = "DATA-TRANSFORMATION"
| [
"[email protected]"
] | |
8e6929fa89c74d13a0c75c8d23b87b2a2effc313 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/display_board_20201107211818.py | 025bebad29b34bbd089707fa7af0e9cfb8956bf5 | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,807 | py | from config import *
import pygame as pg
class Display_board():
def __init__(self, screen):
self.screen = screen
self.font_num = pg.font.SysFont("comicsans", NUMBER_SIZE)
self.font_cdt = pg.font.SysFont("comicsans", CANDIDATE_SIZE)
def draw_val(self, val, x, y):
text1 = self.font_num.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 15, y * BLOCK_SIZE + 15))
def draw_cdt(self, val, x, y):
text1 = self.font_cdt.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 1, y * BLOCK_SIZE + 1))
def on_mouse_press(self, x, y, symbol, modifier):
pass
def draw(self, grid, cell):
for i in range (9):
for j in range (9):
if grid[i][j] != 0:
if type(grid[i][j]) != int:
self.draw_candidates(grid, cell)
else:
text1 = self.font_num.render(str(grid[i][j]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + i * BLOCK_SIZE + 15, TOP_LY + j * BLOCK_SIZE + 14))
size = 0
x_coord = TOP_LX
y_coord = TOP_LY
for i in range(10):
if i % 3 == 0:
print("thick")
thick = 7
else:
print("thin")
thick = 1
if (i + 2) % 3 == 0:
print("increasing size: ", i)
size += 7
# else:
# size = 0
check_diff = x_coord
x_coord += BLOCK_SIZE + size
# thick Thick thin thin thick thin thin thick thin thin thick
# TOP_LX -> TOP_RX & ( TOP_LY -> BS + 7 -> BS -> BS -> BS + 7 -> BS -> BS -> BS + 7 -> BS -> BS)
print("line: ", i, size, "block size: ", BLOCK_SIZE)
print("Start horizontal: ", check_diff, "end: ", x_coord)
print("Start vertical: ", i * BLOCK_SIZE + size, "end: ", 405 + i * BLOCK_SIZE + size)
pg.draw.line(self.screen, BLACK, (TOP_LX,
TOP_LY + i * BLOCK_SIZE + size),
(TOP_RX,
TOP_RY + i * BLOCK_SIZE+ size), thick)
pg.draw.line(self.screen, BLACK, (TOP_LX + i * BLOCK_SIZE+ size,
TOP_LY),
(BOT_LX + i * BLOCK_SIZE+ size,
BOT_LY), thick)
# For candidate placement
# if i % 3 == 0:
# print(BLOCK_SIZE)
# pg.draw.line(self.screen, BLACK, (cell[0],
# cell[1] + i * (cell[2] / 9)),
# ((cell[0] + cell[2]),
# cell[1] + i * (cell[2] / 9)), 1)
# pg.draw.line(self.screen, BLACK, (cell[0] + i * (cell[3] / 9),
# cell[1]),
# (cell[0] + i * (cell[3] / 9),
# cell[1] + cell[3]), 1)
def draw_candidates(self, grid, cell):
new_line = 1
iteration = 1
indent = 15
for number in grid[i][j]:
if iteration % 3 == 1: # Checking if first in line: 1, 4, 7
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, (cell[0] + 3, cell[1] + ((new_line - 1) * indent) + 2))
else:
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, (cell[0] + ((iteration - 1) * indent) + 3, cell[1] + ((new_line - 1) * indent) + 2))
if iteration % 3 == 0: # checking if last in line: 3, 6
new_line += 1
iteration = 0
iteration += 1
def update(self, grid, row, col, blk):
font_val = pg.font.SysFont("comicsans", BOLD)
if row != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[row[0]][row[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 15, TOP_LY + row[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[row[0]][row[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 14, TOP_LY + row[1] * BLOCK_SIZE + 10))
if col != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[col[0]][col[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 15, TOP_LY + col[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[col[0]][col[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 14, TOP_LY + col[1] * BLOCK_SIZE + 10))
if blk != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[blk[0]][blk[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 15, TOP_LY + blk[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[blk[0]][blk[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 14, TOP_LY + blk[1] * BLOCK_SIZE + 10))
def find_cell(self, x, y):
# Only applies glow when a cell is selected
if x == -1 and y == -1:
return
width = BLOCK_SIZE
height = BLOCK_SIZE
# Adjustment in size if bordering a thick line
if x % 3 == 0: # If thick line on the left
start_pos_x = TOP_LX + x * BLOCK_SIZE + 4
width = BLOCK_SIZE# - 4
else:
start_pos_x = TOP_LX + x * BLOCK_SIZE + 1
if (x + 1) % 3 == 0: # If thick line on the right
width = BLOCK_SIZE# - 3.5
if y % 3 == 0: # If thick line on the top
start_pos_y = TOP_LY + y * BLOCK_SIZE + 4
height = BLOCK_SIZE# - 4
else:
start_pos_y = TOP_LY + y * BLOCK_SIZE + 1
if (y + 1) % 3 == 0: # If thick line on the bottom
height = BLOCK_SIZE# - 3.5
return (start_pos_x, start_pos_y, width, height)
def blink(self, alpha, a_change):
if a_change:
alpha += BLINK_SPEED
if alpha >= 175:
a_change = False
elif a_change == False:
alpha += -BLINK_SPEED
if alpha <= 30:
a_change = True
return (alpha, a_change)
| [
"[email protected]"
] | |
8be82ede930331667dd607f674a2cefd5c1b9b0c | dbd8180d9c02c22b42baa5227437714ff352fd8e | /1-100/L237.py | 2ab3f5f5b9eb4f3c53c14f04d1fca60d0a23b65e | [] | no_license | k8godzilla/-Leetcode | 92953dfffc0f06907fa7bd0beea7bc27b16f9efa | 58d5384155f481b1d1b0a7ca69566245dd779554 | refs/heads/master | 2020-06-12T15:35:43.380979 | 2019-08-07T11:14:49 | 2019-08-07T11:14:49 | 194,348,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 11:27:50 2019
@author: admin
"""
'''
请编写一个函数,使其可以删除某个链表中给定的(非末尾)节点,你将只被给定要求被删除的节点。
现有一个链表 -- head = [4,5,1,9],它可以表示为:
示例 1:
输入: head = [4,5,1,9], node = 5
输出: [4,1,9]
解释: 给定你链表中值为 5 的第二个节点,那么在调用了你的函数之后,该链表应变为 4 -> 1 -> 9.
示例 2:
输入: head = [4,5,1,9], node = 1
输出: [4,5,9]
解释: 给定你链表中值为 1 的第三个节点,那么在调用了你的函数之后,该链表应变为 4 -> 5 -> 9.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/delete-node-in-a-linked-list
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next is not None:
node.val = node.next.val
if node.next.next is None:
node.next = None
break
node = node.next
| [
"[email protected]"
] | |
7e33ad8bbc4f98c052b5f081b07505b5b17aa3df | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /9_排序和搜索/经典题/826. 安排工作以达到最大收益.py | ec2e9799c526bbb8a42b79b5b4cd4c785cc7eb7f | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | from typing import List
from bisect import bisect_left
# difficulty[i] 表示第 i 个工作的难度,profit[i] 表示第 i 个工作的收益。
# worker[i] 是第 i 个工人的能力,即该工人只能完成难度小于等于 worker[i] 的工作。
# !每一个工人都最多只能安排一个工作,但是一个工作可以完成多次。
# 我们能得到的最大收益是多少?(调整打怪策略,与什么样的怪兽战斗获得金币最多)
# 1 <= n, m <= 104
# 不能二分查找:题目没有说难度高的工作收益也越大
# 总结:
# !排序+遍历加指针记录
# !对每个工人找到最大收益
class Solution:
def maxProfitAssignment(
self, difficulty: List[int], profit: List[int], worker: List[int]
) -> int:
worker.sort()
jobs = sorted(zip(difficulty, profit), key=lambda x: x[0])
res, ji, maxPro = 0, 0, 0
for w in worker:
while ji < len(jobs) and jobs[ji][0] <= w:
maxPro = max(maxPro, jobs[ji][1])
ji += 1
res += maxPro
return res
print(
Solution().maxProfitAssignment(
difficulty=[2, 4, 6, 8, 10], profit=[10, 20, 30, 40, 50], worker=[4, 5, 6, 7]
)
)
# 输出: 100
# 解释: 工人被分配的工作难度是 [4,4,6,6] ,分别获得 [20,20,30,30] 的收益
| [
"[email protected]"
] | |
6476471730816d1b49b701f2b5d54bc73d71ef9f | 83df1fb88f7abba1198284bb4b8dc8d0a7ff6f93 | /src/third_party/catapult/systrace/systrace/systrace.py | 2a5aa38573139a8b08a9ab0658343a790f717e9d | [
"BSD-3-Clause"
] | permissive | JamshedVesuna/telemetry | 7f3385399e47b7b98f8d3eec80ade43690956cd7 | 1697886b155f22a42e13aa311538f1db65e6e6ed | refs/heads/master | 2021-01-20T09:12:34.645395 | 2016-01-22T08:40:44 | 2016-01-22T08:40:44 | 47,851,831 | 3 | 2 | null | 2020-07-24T04:58:51 | 2015-12-11T21:25:59 | HTML | UTF-8 | Python | false | false | 8,287 | py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
import sys
# Make sure we're using a new enough version of Python.
# The flags= parameter of re.sub() is new in Python 2.7. And Systrace does not
# support Python 3 yet.
version = sys.version_info[:2]
if version != (2, 7):
sys.stderr.write('This script does not support Python %d.%d. '
'Please use Python 2.7.\n' % version)
sys.exit(1)
import imp
import optparse
import os
import util
# The default agent directory.
DEFAULT_AGENT_DIR = 'agents'
def parse_options(argv):
"""Parses and checks the command-line options.
Returns:
A tuple containing the options structure and a list of categories to
be traced.
"""
usage = 'Usage: %prog [options] [category1 [category2 ...]]'
desc = 'Example: %prog -b 32768 -t 15 gfx input view sched freq'
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write HTML to FILE',
default='trace.html', metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
parser.add_option('-l', '--list-categories', dest='list_categories',
default=False, action='store_true',
help='list the available categories and exit')
parser.add_option('-a', '--app', dest='app_name', default=None, type='string',
action='store',
help='enable application-level tracing for comma-separated '
'list of app cmdlines')
parser.add_option('--no-fix-threads', dest='fix_threads', default=True,
action='store_false',
help='don\'t fix missing or truncated thread names')
parser.add_option('--no-fix-tgids', dest='fix_tgids', default=True,
action='store_false',
help='Do not run extra commands to restore missing thread '
'to thread group id mappings.')
parser.add_option('--no-fix-circular', dest='fix_circular', default=True,
action='store_false',
help='don\'t fix truncated circular traces')
parser.add_option('--no-compress', dest='compress_trace_data',
default=True, action='store_false',
help='Tell the device not to send the trace data in '
'compressed form.')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true',
help='(deprecated)')
parser.add_option('--boot', dest='boot', default=False, action='store_true',
help='reboot the device with tracing during boot enabled. '
'The report is created by hitting Ctrl+C after the device '
'has booted up.')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than '
'running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='(deprecated)')
parser.add_option('-e', '--serial', dest='device_serial', type='string',
help='adb device serial number')
parser.add_option('--agent-dirs', dest='agent_dirs', type='string',
help='the directories of additional systrace agent modules.'
' The directories should be comma separated, e.g., '
'--agent-dirs=dir1,dir2,dir3. Directory |%s| is the default'
' agent directory and will always be checked.'
% DEFAULT_AGENT_DIR)
parser.add_option('--target', dest='target', default='android', type='string',
help='chose tracing target (android or linux)')
options, categories = parser.parse_args(argv[1:])
if options.link_assets or options.asset_dir != 'trace-viewer':
parser.error('--link-assets and --asset-dir are deprecated.')
if (options.trace_time is not None) and (options.trace_time <= 0):
parser.error('the trace time must be a positive number')
if (options.trace_buf_size is not None) and (options.trace_buf_size <= 0):
parser.error('the trace buffer size must be a positive number')
return (options, categories)
def write_trace_html(html_filename, script_dir, agents):
"""Writes out a trace html file.
Args:
html_filename: The name of the file to write.
script_dir: The directory containing this script.
agents: The systrace agents.
"""
systrace_dir = os.path.abspath(os.path.dirname(__file__))
html_prefix = read_asset(systrace_dir, 'prefix.html')
html_suffix = read_asset(systrace_dir, 'suffix.html')
trace_viewer_html = read_asset(script_dir, 'systrace_trace_viewer.html')
# Open the file in binary mode to prevent python from changing the
# line endings.
html_file = open(html_filename, 'wb')
html_file.write(html_prefix.replace('{{SYSTRACE_TRACE_VIEWER_HTML}}',
trace_viewer_html))
html_file.write('<!-- BEGIN TRACE -->\n')
for a in agents:
html_file.write(' <script class="')
html_file.write(a.get_class_name())
html_file.write('" type="application/text">\n')
html_file.write(a.get_trace_data())
html_file.write(' </script>\n')
html_file.write('<!-- END TRACE -->\n')
html_file.write(html_suffix)
html_file.close()
print('\n wrote file://%s\n' % os.path.abspath(html_filename))
def create_agents(options, categories):
"""Create systrace agents.
This function will search systrace agent modules in agent directories and
create the corresponding systrace agents.
Args:
options: The command-line options.
categories: The trace categories to capture.
Returns:
The list of systrace agents.
"""
agent_dirs = [os.path.join(os.path.dirname(__file__), DEFAULT_AGENT_DIR)]
if options.agent_dirs:
agent_dirs.extend(options.agent_dirs.split(','))
agents = []
for agent_dir in agent_dirs:
if not agent_dir:
continue
for filename in os.listdir(agent_dir):
(module_name, ext) = os.path.splitext(filename)
if (ext != '.py' or module_name == '__init__'
or module_name.endswith('_unittest')):
continue
(f, pathname, data) = imp.find_module(module_name, [agent_dir])
try:
module = imp.load_module(module_name, f, pathname, data)
finally:
if f:
f.close()
if module:
agent = module.try_create_agent(options, categories)
if not agent:
continue
agents.append(agent)
return agents
def main():
options, categories = parse_options(sys.argv)
agents = create_agents(options, categories)
if not agents:
dirs = DEFAULT_AGENT_DIR
if options.agent_dirs:
dirs += ',' + options.agent_dirs
sys.stderr.write('No systrace agent is available in directories |%s|.\n' %
dirs)
sys.exit(1)
try:
update_systrace_trace_viewer = __import__('update_systrace_trace_viewer')
except ImportError:
pass
else:
update_systrace_trace_viewer.update()
for a in agents:
a.start()
for a in agents:
a.collect_result()
if not a.expect_trace():
# Nothing more to do.
return
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
write_trace_html(options.output_file, script_dir, agents)
def read_asset(src_dir, filename):
return open(os.path.join(src_dir, filename)).read()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3d5c8745030e29f443d5aefebb7a4c6d895d30ec | 18e886106d2d84c978e12aa29691f5c0269409ca | /src/biotite/sequence/search.py | 975603f8edc57daec4a299306d9c5f47044335c4 | [
"BSD-3-Clause"
] | permissive | avestamh/biotite | fed37fb4733b4b064cad06b0fccb0193f295c6a2 | d2066e1011bffa7ef83db9fddf6207da8834cc79 | refs/heads/master | 2020-08-06T06:59:21.326038 | 2019-09-27T17:03:06 | 2019-09-27T17:03:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__author__ = "Patrick Kunzmann"
__all__ = ["find_subsequence", "find_symbol", "find_symbol_first",
"find_symbol_last"]
import numpy as np
def find_subsequence(sequence, query):
"""
Find a subsequence in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the subsequence in.
query : Sequence
The potential subsequence. Its alphabet must extend the
`sequence` alphabet.
Returns
-------
match_indices : ndarray
The starting indices in `sequence`, where `query` has been
found. The array is empty if no match has been found.
Raises
------
ValueError
If the `query` alphabet does not extend the `sequence` alphabet.
Examples
--------
>>> main_seq = NucleotideSequence("ACTGAATGA")
>>> sub_seq = NucleotideSequence("TGA")
>>> print(find_subsequence(main_seq, sub_seq))
[2 6]
"""
if not sequence.get_alphabet().extends(query.get_alphabet()):
raise ValueError("The sequences alphabets are not equal")
match_indices = []
frame_size = len(query)
for i in range(len(sequence) - frame_size + 1):
sub_seq_code = sequence.code[i : i + frame_size]
if np.array_equal(query.code, sub_seq_code):
match_indices.append(i)
return np.array(match_indices)
def find_symbol(sequence, symbol):
"""
Find a symbol in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the symbol in.
symbol : object
The symbol to be found in `sequence`.
Returns
-------
match_indices : ndarray
The indices in `sequence`, where `symbol` has been found.
"""
code = sequence.get_alphabet().encode(symbol)
return np.where(sequence.code == code)[0]
def find_symbol_first(sequence, symbol):
"""
Find first occurence of a symbol in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the symbol in.
symbol : object
The symbol to be found in `sequence`.
Returns
-------
first_index : int
The first index of `symbol` in `sequence`. If `symbol` is not in
`sequence`, -1 is returned.
"""
match_i = find_symbol(sequence, symbol)
if len(match_i) == 0:
return -1
return np.min(match_i)
def find_symbol_last(sequence, symbol):
"""
Find last occurence of a symbol in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the symbol in.
symbol : object
The symbol to be found in `sequence`.
Returns
-------
flast_index : int
The last index of `symbol` in `sequence`. If `symbol` is not in
`sequence`, -1 is returned.
"""
match_i = find_symbol(sequence, symbol)
if len(match_i) == 0:
return -1
return np.max(match_i)
| [
"[email protected]"
] | |
ba108f845d88a211249cea333a6ee45a47788350 | 0fa517fb36fbffd3ffcc9c1a016f1812a8139102 | /numpy_01_arrays.py | 90971c221829f79b1720b2bb2e27d2a8362b7e47 | [] | no_license | sooyoungkim/flipped-python-modules | 2e7ed4d8590edcb87695d9eb3f5a096f7c362006 | cfa5074b22429340e8586cb49b6a3c25df61e6f3 | refs/heads/master | 2020-03-09T10:57:45.480915 | 2018-04-09T10:01:18 | 2018-04-09T10:01:18 | 128,749,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,497 | py | import numpy as np
########################################################
# Arrays
# - Numpy의 중심 feature는 Array object class이다.
# - Python의 List와 비슷하지만 Array의 모든 원소는 같은 타입이다.
# - 대개 float이나 int 타입
# - 엄청 큰 numeric 데이터를 매우 빠르고 효과적으로 계산.
# - Arrays 는 multidimensional이라 할 수 있다.
# - Array생성 : numpy.array(the list, the type)
########################################################
########################################################
# a one-dimensional array
one = np.array([1, 4, 5, 8], float)
print(one)
print(type(one))
# Array 원소들에 List처럼 접근, 슬라이싱, 조작 가능
one[:2]
one[3]
one[0]
########################################################
########################################################
# a two-dimensional array
two = np.array([[1, 2, 3], [4, 5, 6]], float)
print(two)
two[0,0]
two[0,1]
two[0,2]
two[1,0]
two[1,1]
two[1,2]
two[1, :]
two[:, 2]
two[-1:, -2:]
print(two.shape) # (행 size, 열 size) 튜플 반환 -> (2,3)
print(two.dtype) # array에 저장된 value의 타입 반환
# float64 : 파이썬에서 float타입과 비슷
# NumPy는 실수를 배정밀도(8byte,64비트)로 처리
len(two) # 행의 길이
2 in two # array 안에 value 2가 존재하는지?
0 in two
########################################################
########################################################
# a one-dimensional array -> a two-dimensional array
arr = np.array(range(10), float)
print(arr)
# [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9.]
arr = arr.reshape((5,2)) # 10개 데이터가 담긴 1차원 배열을 -> 2차원의 새로운 배열로 생성
print(arr)
#[[ 0. 1.]
# [ 2. 3.]
# [ 4. 5.]
# [ 6. 7.]
# [ 8. 9.]]
print(arr.shape) # (5,2)
########################################################
########################################################
a = np.array([1, 2, 3], float)
b = a # reference 참조
c = a.copy() # 메모리에서 분리, 새로운 array 생성
a[0] = 0 # a와 a를 참조하는 b에 영향
print(a) # [0. 2. 3.]
print(b)
print(c)
a.tolist() # arrays 를 리스트로 [0.0 2.0 3.0]
list(a) # arrays 를 리스트로 [0.0 2.0 3.0]
s = a.tostring() # a binary string (i.e., not in human-readable form)으로 컨버팅
print(s)
print(np.fromstring(s)) # 이 a binary string 으로 array 생성
a.fill(0)
print(a)
########################################################
########################################################
# Transposed
a = np.array(range(6), float).reshape((2,3))
print(a)
#[[ 0. 1. 2.]
# [ 3. 4. 5.]]
print(a.transpose())
#[[ 0. 3.]
# [ 1. 4.]
# [ 2. 5.]]
print(a) # 변경 없음
########################################################
########################################################
# a two-dimensional array -> a one-dimensional array
a = np.array([[1,2,3], [4,5,6]], float)
print(a)
#[[ 1. 2. 3.]
# [ 4. 5. 6.]]
print(a.flatten())
#[ 1. 2. 3. 4. 5. 6.]
print(a) # 변경 없음
########################################################
########################################################
# a one-dimensional array
# 2개 이상의 배열을 하나로 합칠 수 있다.
a = np.array([1,2], float)
b = np.array([3,4,5,6], float)
c = np.array([7,8,9], float)
print( np.concatenate((a,b,c)) )
# a two-dimensional array
# 2차원 이상 배열도 하나로 합칠 수 있다.
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[5, 6], [7,8]], float)
print(np.concatenate((a,b))) # axis=0과 같다
print(np.concatenate((a,b), axis=0))
#[[ 1. 2.]
# [ 3. 4.]
# [ 5. 6.]
# [ 7. 8.]]
print(np.concatenate((a,b), axis=1))
#[[ 1. 2. 5. 6.]
# [ 3. 4. 7. 8.]]
########################################################
########################################################
# ?????
# 배열의 차원 수를 증가시킬 수 있다.
# 벡터와 행렬의 적절한 차원의 array를 생성하기 편리하다
a = np.array([1, 2, 3], float)
print(a[:, np.newaxis])
#[[ 1.]
# [ 2.]
# [ 3.]]
print(a[:, np.newaxis].shape) # 1차원 배열 -> 3차원 배열로 (3,1)
print(a[np.newaxis, :])
# [[ 1. 2. 3.]]
print(a[np.newaxis, :].shape) # (1,3)
########################################################
########################################################
| [
"[email protected]"
] | |
7dd305bcfab4bc04364c22ec193187aa5f1f6479 | 5390e730defb07660fb6928f5de5970db80b5457 | /docs/conf.py | c8a64641f11e45a5d5e7a0132d35e129d865d969 | [
"BSD-2-Clause"
] | permissive | DrMeers/plata | a732831f001bdc25ab425d6e3c3f70c4d3d303fb | eedfdadda573e9f7b28beddf9ec578c91903355a | refs/heads/master | 2021-01-18T10:32:01.027409 | 2013-06-28T07:42:40 | 2013-06-28T07:42:40 | 771,395 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,605 | py | # -*- coding: utf-8 -*-
#
# Plata documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import PIL.Image
sys.modules['Image'] = PIL.Image
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../tests/'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Plata'
copyright = u'2010, Feinheit GmbH and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import plata
version = '.'.join(map(str, plata.VERSION))
# The full version, including alpha/beta/rc tags.
release = plata.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme_path = ['_theme']
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Platadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Plata.tex', u'Plata Documentation',
u'Feinheit GmbH and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| [
"[email protected]"
] | |
df82af94f16968ac4caa95eaf4b55ce03d574d45 | cd1ac53e02cf8879e092bdc4a625e465e241d6e0 | /apps/goods/views.py | b763376cb7524115ff1f1d04353e31927bc680fd | [] | no_license | furuiyang0715/dailyfresh | a150d472237822730f6c372620c5da19da514590 | 98c678c34d5b26eda6b99f7a8d1ca74b6f4399d3 | refs/heads/master | 2022-12-18T17:38:22.200364 | 2020-09-17T08:49:30 | 2020-09-17T08:49:30 | 286,921,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,199 | py | import json
from django.http import JsonResponse
from django.shortcuts import render
from django.views import View
from django_redis import get_redis_connection
from goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
class IndexView(View):
"""首页"""
def get(self, request):
"""显示首页"""
# (1) 获取商品的种类信息
types = GoodsType.objects.all()
# (2) 获取轮播商品的信息
# 需要根据展示的顺序进行排序
# "index": 默认按照升序排列
# - "index": 安装降序排列
goods_banners = IndexGoodsBanner.objects.all().order_by("index")
# (3) 获取首页促销活动信息
goods_promotion = IndexPromotionBanner.objects.all().order_by("index")
# (4) 获取首页分类商品展示信息
goods_type = IndexTypeGoodsBanner.objects.all()
for type in types:
# 每个类型的图片展示行
image_goods_lst = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1)
# 每个类型的文字展示行
text_goods_lst = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0)
type.image_goods_lst = image_goods_lst
type.text_goods_lst = text_goods_lst
# (5) 获取用户购物车中商品的数目
'''
在什么时候添加购物车记录?
当用户点击加入购物车时需要添加。
什么时候需要获取购物车记录 ?
使用到购物车中数据已经访问购物车页面的时候。
使用什么存储购物车的记录?
redis。
分析存储购物车记录的格式 ?
一个用户的购物车记录成用户的一条数据来进行保存。
使用 redis 中的 hash 结构:
"cart_用户id": {"sku_id1":商品数目, "sku_id2": 商品数目, ... }
使用 hlen 获取购物车中商品的条目数。
'''
user = request.user
cart_count = 0
'''
在 redis 中增加测试数据:
hmset cart_39 1 3 2 5 3 2
'''
if user.is_authenticated:
# print("用户已登录")
conn = get_redis_connection('default')
cart_key = "cart_{}".format(user.id)
cart_count = conn.hlen(cart_key)
# 组织模板上下文
context = {
"types": types,
"goods_banners": goods_banners,
"goods_promotion": goods_promotion,
"goods_type": goods_type,
"cart_count": cart_count,
}
return render(request, "index.html", context)
'''
首页要做一个页面静态化:
如果一个页面经常被访问,但是页面变化得不频繁,或者可以捕捉何时发生改变,那么可以使用页面静态化的技术。
传统的 django 页面渲染:
数据库查询数据 --> 数据+模板渲染 --> 返回前端
使用页面静态化技术:
在数据发生变化时异步去更新生成新的静态页面
用户访问时通过 nginx 直接访问生成好的静态文件
''' | [
"[email protected]"
] | |
fa0969ec326cfb3e1f13c24ab9b1f289f1196e41 | ccb73097804b2bf6070dba519658ab77bd9a044e | /leetcode/4_二叉树专题/08_二叉树中的最大路径和.py | 38691afc21fef7b84f5b8f4dbaadddf093fe627c | [] | no_license | ryanatgz/data_structure_and_algorithm | 4c43d426534381739891819c4c1e25b500f017ae | 967b0fbb40ae491b552bc3365a481e66324cb6f2 | refs/heads/master | 2022-03-13T01:17:58.015457 | 2019-09-23T06:49:20 | 2019-09-23T06:49:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | # encoding: utf-8
"""
@project:data_structure_and_algorithm
@author: Jiang Hui
@language:Python 3.7.2 [GCC 7.3.0] :: Anaconda, Inc. on linux
@time: 2019/8/21 16:01
@desc: 第124题
"""
"""
本题思路和上一题有相通之处,每一个节点有一个权重,以该节点为根结点的路径最大值有三种情况:
(1) 从根结点往左子树走,不一定到达左子树的根结点,此时 max_path = root.val + L
(2) 从根结点往右子树走,不一定到达右子树的根结点,此时 max_path = root.val + R
(3) 不走,路径只有该节点一个节点,当L和R都小于0时取这种情况
重点:##
最终取得的最大值,如果小于0,则应当返回0,表示我们在计算最大路径时,不考虑该分支的路径长度。
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
ans = 0
def maxPathSum(self, root: TreeNode) -> int:
self.ans = float('-inf')
self.dfs(root)
return self.ans
def dfs(self, root):
if not root:
return 0
left = self.dfs(root.left)
right = self.dfs(root.right)
self.ans = max(self.ans, left + right + root.val)
# 原始为:max(0,max(root.val,root.val+left,root.val+right))
# 可以把root.val提出来,得到 max(0,root.val+max(0,left,right))
# 因为left和right是大于等于0的,所以内层的0也可以去掉,max(0,root.val+max(left,right))
return max(0, root.val + max(left, right)) # 如果最大值小于0的话,则返回0,对应我们说明的三种情况
| [
"[email protected]"
] | |
8f6f439a66a4f9dd1e2b7499a465f033c7001ddb | db8a9a6d2dd4abb762727b2f4570e553ed349c70 | /opengever/task/response_syncer/comment.py | a75a331a2b66d20d65e2b28abc203ae559e24916 | [] | no_license | braegelno5/opengever.core | 75e8e31a6f15385c9f7551b9c671fdc75ba358be | 88d9bec614544de8ca51bf9fcc8cfc0c05449bb5 | refs/heads/master | 2020-05-30T11:53:44.003641 | 2017-06-28T15:32:11 | 2017-06-28T15:32:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | from opengever.task.response_syncer import BaseResponseSyncerReceiver
from opengever.task.response_syncer import BaseResponseSyncerSender
from opengever.task.response_syncer import ResponseSyncerSenderException
class CommentResponseSyncerSender(BaseResponseSyncerSender):
TARGET_SYNC_VIEW_NAME = '@@sync-task-comment-response'
def raise_sync_exception(self, task, transition, text, **kwargs):
raise ResponseSyncerSenderException(
'Could not add comment on task on remote admin unit {} ({})'.format(
task.admin_unit_id,
task.physical_path))
class CommentResponseSyncerReceiver(BaseResponseSyncerReceiver):
"""This view receives a sync-task-comment-response request from another
client after new comments have been added to a successor or predecessor.
"""
| [
"[email protected]"
] | |
356abdb44404654a8daade1fdfc7d21c7b2833a7 | 9835b6949fe4c8018de57aee531dedf1509337cc | /October_2020/oct_09_Serialize_and_Deserialize_BST.py | a0bd3b0f3ad419ec369340ef77c919f66fd8aec5 | [] | no_license | jcai0o0/My_Leetcode_Solutions | f6edea0693d252a99e6507a1724a89763113f8a0 | 3fc909c01c6a345f625c9ab9e0f1584ea5fa8ab4 | refs/heads/master | 2023-01-01T04:08:33.929184 | 2020-10-17T02:01:56 | 2020-10-17T02:01:56 | 289,094,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | class Codec:
def serialize(self, root):
"""
Encodes a tree to a single string.
"""
def postorder(root):
return postorder(root.left) + postorder(root.right) + [root.val] if root else []
return ' '.join(map(str, postorder(root)))
def deserialize(self, data):
"""
Decodes your encoded data to tree.
"""
def helper(lower=float('-inf'), upper=float('inf')):
if not data or data[-1] < lower or data[-1] > upper:
return None
val = data.pop()
root = TreeNode(val)
root.right = helper(val, upper)
root.left = helper(lower, val)
return root
data = [int(x) for x in data.split(' ') if x]
return helper() | [
"[email protected]"
] | |
db09ee14b15a7db7c6252da33646d85dda887742 | f0af28c525a6eac5dbdaf8ffba23dad1138e5b7e | /src/yaml/yaml_prefab.py | f8a39bc82164004356c0e20af7862cdf14451b93 | [
"MIT"
] | permissive | adrianogil/SemanticCode | 740886756d83e569dcfe1aa30d9f4e5db3f394cb | b826b99965f80fc42e654e33ebbebc3aad10f0cd | refs/heads/main | 2021-01-22T18:02:25.065879 | 2020-10-22T20:51:19 | 2020-10-22T20:51:19 | 85,055,494 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | import os, sys
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
# print(__path__)
if __path__ not in sys.path:
sys.path.insert(0, __path__)
from yaml_element import YamlElement
class YamlPrefab(YamlElement):
guid = ''
target_id = ''
game_object = None
transform = None
# def __init__(self, gameobject_name, definition_line):
# super(YamlPrefab, self).__init__(gameobject_name, definition_line)
# def print_outline(self):
# object_outline = '<a href="' + str(self.definition_line) + '">Prefab ' + \
# self.gameobject_name + '</a>'
# return object_outline
# Variables used to compoung YamlGameObject
current_go_id = ''
current_go_line = 0
go_instance = None
#Prefab detection
if last_line.find('--- !u!') != -1 and line.find("Prefab") != -1:
current_prefab_id = last_line[14:-1]
current_prefab_line = i
found_prefab = True
if found_prefab and line.find("target: {") != -1:
start_prefab_guid = 0
end_prefab_guid = 0
for l in range(20, line_size):
if line[l-6:l].find("guid: ") != -1:
start_prefab_guid = l
if start_prefab_guid > 0 and line[l] == ",":
end_prefab_guid = l
break
current_prefab_guid = line[start_prefab_guid:end_prefab_guid]
# print("found prefab with guid: " + current_prefab_guid)
if current_prefab_guid in parse_data['yaml']['filenames_by_guid']:
prefab_filename = parse_data['yaml']['filenames_by_guid'][current_prefab_guid]
# outline_data.append(YamlPrefab(prefab_filename, current_prefab_line))
found_prefab = False
current_prefab_line = 0
current_prefab_id = ''
current_prefab_guid = ''
def is_start_of_yaml_section(line):
return line.find("GameObject") != -1
def on_yaml_section_start(line, line_number):
current_go_id = line[10:-1]
current_go_line = line_number
go_instance = None
def parse_line(line, file_data):
if line.find("m_Name") != -1:
gameobject_name = line[9:-1]
file_data['gameobject_name_by_id'][current_go_id] = gameobject_name
file_data['row_by_id'][current_go_id] = current_go_line
go_instance = YamlGameObject(gameobject_name, current_go_line)
go_instance.yaml_id = current_go_id
return file_data
def on_yaml_section_finish():
return go_instance | [
"[email protected]"
] | |
a66e50dea2e018898bfbd892032ea056afcd0c30 | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /backend/api/v1beta1/python_http_client/kfp_server_api/models/api_run_storage_state.py | 1175a3a1e85da38aa0bcbec22b383fc7873b53ff | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 2,894 | py | # coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfp_server_api.configuration import Configuration
class ApiRunStorageState(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
AVAILABLE = "STORAGESTATE_AVAILABLE"
ARCHIVED = "STORAGESTATE_ARCHIVED"
allowable_values = [AVAILABLE, ARCHIVED] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""ApiRunStorageState - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiRunStorageState):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiRunStorageState):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
0b6ce0e2925acd8b5d04ee7bf7b8649418802315 | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /es_maml/task.py | d84fcbc1384da4dc3099407c9c88cf633642152d | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 7,023 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-return-or-yield,unused-argument,missing-docstring,g-doc-args,line-too-long,invalid-name,pointless-string-statement
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
class Task(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def state_dimensionality(self):
raise NotImplementedError("Abstract method")
@abc.abstractmethod
def action_dimensionality(self):
raise NotImplementedError("Abstract method")
class ClassificationTask(Task):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def generate_samples(self):
raise NotImplementedError("Abstract method")
class RLTask(Task):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def deterministic_start(self):
raise NotImplementedError("Abstract method")
@abc.abstractmethod
def step(self, action):
raise NotImplementedError("Abstract method")
class SinusodialTask(ClassificationTask):
def __init__(self, task_id, sample_num=100, **kwargs):
self.task_id = task_id
np.random.seed(task_id)
self.amp = np.random.uniform(0.1, 5.0)
self.phase = np.random.uniform(0.0, np.pi)
self.sample_num = sample_num
def generate_samples(self):
xs = np.random.uniform(-5.0, 5.0, self.sample_num)
ys = np.array([self.amp * np.sin(x - self.phase) for x in xs])
return xs, ys
def state_dimensionality(self):
return 1
def action_dimensionality(self):
return 1
class NavigationTask2d(RLTask):
def __init__(self, task_id, **kwargs):
self.task_id = task_id
np.random.seed(task_id)
self.goal_pos = np.random.uniform(low=-1.0, high=1.0, size=2)
self.t = 0
def random_start(self):
self.agent_pos = np.array([0.0, 0.0])
return self.agent_pos
def deterministic_start(self):
self.agent_pos = np.array([0.0, 0.0])
return self.agent_pos
def step(self, action):
clipped_action = np.clip(action, a_min=-0.1, a_max=0.1)
self.agent_pos += clipped_action
self.agent_pos = np.clip(self.agent_pos, a_min=-1.0, a_max=1.0)
reward = -1.0 * np.square(np.linalg.norm(self.agent_pos - self.goal_pos))
done = False
if reward >= -0.01:
done = True
return self.agent_pos, reward, done, None
def reset(self):
self.t = 0
return self.deterministic_start()
def restart(self):
return self.reset()
def state_dimensionality(self):
return 2
def action_dimensionality(self):
return 2
class NavigationTask4corner(RLTask):
def __init__(self, task_id, **kwargs):
self.task_id = task_id
corner_int = task_id % 4
corner_id_to_pos = {
0: np.array([2., 2.]),
1: np.array([-2., 2.]),
2: np.array([-2., -2.]),
3: np.array([2., -2.])
}
self.goal_pos = corner_id_to_pos[corner_int]
self.t = 0
def random_start(self):
self.agent_pos = np.array([0.0, 0.0])
return self.agent_pos
def deterministic_start(self):
self.agent_pos = np.array([0.0, 0.0])
return self.agent_pos
def step(self, action):
clipped_action = np.clip(action, a_min=-0.1, a_max=0.1)
self.agent_pos += clipped_action
self.agent_pos = np.clip(self.agent_pos, a_min=-5.0, a_max=5.0)
sq_dist = np.square(np.linalg.norm(self.agent_pos - self.goal_pos))
alive_penalty = -4.0
# reward is only shown if near the corner
reward = alive_penalty + max(0.0, 4.0 - sq_dist)
return self.agent_pos, reward, False, None
def reset(self):
self.t = 0
return self.deterministic_start()
def restart(self):
return self.reset()
def state_dimensionality(self):
return 2
def action_dimensionality(self):
return 2
class NavigationTaskCombo(RLTask):
def __init__(self, task_id, num_subset_goals=2, num_goals=6, **kwargs):
self.task_id = task_id
self.id_to_goal = {}
for i in range(num_goals):
temp_goal = np.sqrt(8.0) * np.array([
np.cos(2 * np.pi * i / float(num_goals)),
np.sin(2 * np.pi * i / float(num_goals))
])
self.id_to_goal[i] = np.copy(temp_goal)
np.random.seed(task_id)
self.goal_ids = np.random.choice(num_goals, num_subset_goals, replace=False)
self.t = 0.0
self.num_subset_goals = num_subset_goals
self.num_goals = num_goals
self.boundary = 4.0
self.visited_goals = []
def random_start(self):
self.t = 0.0
self.visited_goals = []
self.agent_pos = np.array([0.0, 0.0])
self.final_obs = np.concatenate((self.agent_pos, np.array([self.t])))
# return self.final_obs
return self.agent_pos
def deterministic_start(self):
self.t = 0.0
self.visited_goals = []
self.agent_pos = np.array([0.0, 0.0])
self.final_obs = np.concatenate((self.agent_pos, np.array([self.t])))
# return self.final_obs
return self.agent_pos
def step(self, action):
self.t += 1.0
clipped_action = np.clip(action, a_min=-0.1, a_max=0.1)
self.agent_pos += clipped_action
self.agent_pos = np.clip(self.agent_pos, a_min=-5.0, a_max=5.0)
total_reward = 0.0
for g in range(self.num_goals):
if g not in self.goal_ids:
temp_dist = np.square(
np.linalg.norm(self.agent_pos - self.id_to_goal[g]))
# higher penalties
wrong_goal_penalty = 10000.0 * min(0.0, temp_dist - self.boundary)
total_reward += wrong_goal_penalty
else: # g is a correct goal
if g not in self.visited_goals: # if it hasn't been turned off yet
sq_dist = np.square(
np.linalg.norm(self.agent_pos - self.id_to_goal[g]))
alive_penalty = -1.0 * self.boundary
# reward is only shown if near the corner
total_reward += (alive_penalty + max(0.0, self.boundary - sq_dist))
if sq_dist < 0.01:
self.visited_goals.append(g)
# g is a correct goal and was visited, and this goal is turned off
else:
total_reward += 0.0
self.final_obs = np.concatenate((self.agent_pos, np.array([self.t])))
# return self.final_obs, total_reward, False, None
return self.agent_pos, total_reward, False, None
def reset(self):
self.t = 0.0
self.visited_goals = []
return self.deterministic_start()
def restart(self):
return self.reset()
def state_dimensionality(self):
return 2
def action_dimensionality(self):
return 2
| [
"[email protected]"
] | |
7ad079b58c3e966fe6c64ed4a5fb161abf1e06f0 | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/matplotlib/tests/test_backend_qt5.py | 5c472c488c0e5f50c53b7fb31b265f732c5bc4ca | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3a3557f8e3099b0a26d5723cd3f215b246a4b65b74b89afe5e5b2c08ef15cb85
size 5103
| [
"[email protected]"
] | |
c5bb45f484309c77d1f5e6fcabcdb3a471336fc4 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/VBF/Full2016/HTXS/structure.py | ec651bf0cdec72ef741ab3299277ae508df460c0 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 8,304 | py | # structure configuration for datacard
#structure = {}
# keys here must match keys in samples.py
#
structure['DY'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Wjets'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Fake'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Fake_em'] = {
'isSignal' : 0,
'isData' : 0,
'removeFromCuts' : [
'hww2l2v_13TeV_me_pm_0j_pt2ge20',
'hww2l2v_13TeV_me_pm_0j_pt2ge20',
'hww2l2v_13TeV_me_pm_1j_pt2ge20',
'hww2l2v_13TeV_me_mp_0j_pt2ge20',
'hww2l2v_13TeV_me_mp_1j_pt2ge20',
'hww2l2v_13TeV_me_pm_0j_pt2lt20',
'hww2l2v_13TeV_me_pm_1j_pt2lt20',
'hww2l2v_13TeV_me_mp_0j_pt2lt20',
'hww2l2v_13TeV_me_mp_1j_pt2lt20'],
}
structure['Fake_me'] = {
'isSignal' : 0,
'isData' : 0,
'removeFromCuts' : [
'hww2l2v_13TeV_em_pm_0j_pt2ge20',
'hww2l2v_13TeV_em_pm_0j_pt2ge20',
'hww2l2v_13TeV_em_pm_1j_pt2ge20',
'hww2l2v_13TeV_em_mp_0j_pt2ge20',
'hww2l2v_13TeV_em_mp_1j_pt2ge20',
'hww2l2v_13TeV_em_pm_0j_pt2lt20',
'hww2l2v_13TeV_em_pm_1j_pt2lt20',
'hww2l2v_13TeV_em_mp_0j_pt2lt20',
'hww2l2v_13TeV_em_mp_1j_pt2lt20'],
}
structure['ttbar'] = {
'isSignal' : 0,
'isData' : 0
}
structure['singletop'] = {
'isSignal' : 0,
'isData' : 0
}
structure['top'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WWewk'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggWW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggWW_Int'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Wg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Vg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WZgS_L'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WZgS_H'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Zg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VVV'] = {
'isSignal' : 0,
'isData' : 0,
'removeFromCuts' : ['hww2l2v_13TeV_dytt_of2j_vbf'],
}
structure['ZZ'] = {
'isSignal' : 0,
'isData' : 0
}
# Signals
structure['ggH_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ggH_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['qqH_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['qqH_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_had_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_had_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_lep_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_lep_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_had_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_had_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_lep_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_lep_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ggZH_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ggZH_lep_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ggZH_lep_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['bbH_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['bbH_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ttH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ttH_fwd_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ggH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ggH_fwd_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['qqH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['qqH_fwd_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_had_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_had_fwd_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_lep_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_lep_fwd_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_had_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_had_fwd_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_lep_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_lep_fwd_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
# data
structure['DATA'] = {
'isSignal' : 0,
'isData' : 1
}
| [
"[email protected]"
] | |
72e60cc886839197cd64e36eae449aa89d95327f | ae711d6802fa43efdc25ac2357789a6d4e735a92 | /ipypublish/export_plugins/__init__.py | 45b81c1aeab5a61e4575c48ece103b5a3a64ad42 | [
"BSD-3-Clause"
] | permissive | stonebig/ipypublish | 8c946d0b3788874d348379f0485d9a4e4807be54 | 218bb3c557b09314ac91910d8ee79d312d0f481c | refs/heads/master | 2021-01-16T19:07:09.729341 | 2017-08-08T22:01:45 | 2017-08-08T22:01:45 | 100,138,691 | 0 | 0 | null | 2017-08-12T21:29:17 | 2017-08-12T21:29:17 | null | UTF-8 | Python | false | false | 83 | py | #!/usr/bin/env python
#from ipypublish.export_plugins import latex_ipypublish_main | [
"[email protected]"
] | |
8fff01e105f0aa6711b1988eac61df3ec5a04400 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_142/418.py | e273a5f2ab13e3c6917814dd6401043855e717f1 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | import math
#YOLOSWAG
def getRepeats(string):
last = ""
repeats = []
cur = -1
for c in string:
if c == last:
repeats[cur][1] += 1
else:
repeats.append([c,1])
cur += 1
last = c
return repeats
def getMoves(checkLengths, repeats):
moves = 0
for x in range(0,checkLengths):
bestMove = -1
for a in repeats:
checkMoves = 0
charCompare = a[x][0]
for b in repeats:
if b[x][0] == charCompare:
checkMoves += abs(a[x][1] - b[x][1])
else:
return -1
if bestMove == -1 or bestMove > checkMoves:
bestMove = checkMoves
moves += bestMove
return moves
inputs = open("in.txt").readlines()
output = open('out.txt', 'w')
t = int(inputs[0])
r = 1
for i in range(1, t + 1):
#r = (i - 1) * 3 + 1
n = int(inputs[r])
r += 1
repeats = []
for j in range(0, n):
repeats.append(getRepeats(inputs[r].rstrip()))
r+=1
moves = 0
checkLengths = -1
for re in repeats:
if checkLengths == -1:
checkLengths = len(re)
if len(re) != checkLengths:
checkLengths = -1
break
if checkLengths == -1:
answer = "Case #%d: Fegla Won\n"%(i)
else:
moves = getMoves(checkLengths, repeats)
if(moves == -1):
answer = "Case #%d: Fegla Won\n"%(i)
else:
answer = "Case #%d: %d\n"%(i,moves)
print(answer)
output.write(answer)
output.close()
| [
"[email protected]"
] | |
d8564359bfe77459d7cad265911cb5ead91e4c39 | c61c9bedba1968bfaf571ac3996b696fc35890a6 | /Chapter3/3-4.py | 11467a4eb3c9f06b6bdd059979b31d2f5dca20e8 | [] | no_license | ArunRamachandran/ThinkPython-Solutions | 497b3dbdeba1c64924fe1d9aa24204a9ca552c5b | 1a0872efd169e5d39b25134960168e3f09ffdc99 | refs/heads/master | 2020-04-01T10:23:20.255132 | 2014-11-07T17:04:52 | 2014-11-07T17:04:52 | 25,806,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | # A fn. object is a value you can assign to a variable or pass as an argument
# 'do_twice' is a fn that take a fn objct as an argument and calls it twice
#
def print_spam():
print "spam"
def do_twice(f):
f()
f()
do_twice(print_spam)
# 2.Modify do_twice so that it takes two arguments, a fn objct and a value,
# and calls the fn twice, passing the value as an argument.
word = raw_input("Enter a word..\n")
def print_spam(word):
print word
def do_twice(f,word):
f(word)
f(word)
do_twice(print_spam,word)
#3. Write a grn. version of print_spam, called print_twice, that takes a
# a string as a paramtere and print it twice.
word = raw_input("Enter a string\n");
def print_twice(word):
print word
print word
print_twice(word)
#4. Use the modified version of do_twice to call print_twice, passing 'spam'
# as an argument.
print "\n"
def do_twice(word):
print_twice(word)
print_twice(word)
def print_twice(word):
print word
s = "hello"
do_twice(s)
# 5.Define a new fn. called do_four(), that takes a fn object and a value
# and calls the fn four times, passing the values as a parameter . There
# should be only two statements in the body of this fn, not four
obj = raw_input("Give a string .\n")
def f(obj):
print obj
def do_twice(f,obj):
f(obj)
f(obj)
def do_four(f,obj):
do_twice(f,obj)
do_twice(f,obj)
do_four(f,obj)
| [
"[email protected]"
] | |
1405f80452859a40af6dcef9d1d18726e19f09e1 | 300eb733976a31d73a68ddf20d986ba6aceb6ef5 | /ewoexit2708/routes.py | 1f90724f91805ba14797141cffb48edcf96512fa | [
"MIT"
] | permissive | ajthummar/jesse_strategies | f168ae455970bd91845807dd7b0346e77471db09 | 5d23b44f97006e6cecf8519a3951accbfde09fc7 | refs/heads/master | 2023-08-12T21:35:22.458840 | 2021-10-18T13:26:12 | 2021-10-18T13:26:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | # Some pairs have been disabled due to a lack of candle data.
# You can restore them to test within recent months.
routes = [
('FTX Futures', 'ETC-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
# ('FTX Futures', 'LUNA-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'FIL-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
# ('FTX Futures', 'FTM-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'ETH-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'DOT-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'XTZ-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'BNB-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'NEO-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'SOL-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'LINK-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'XLM-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'MATIC-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'TRX-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'BTC-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'AAVE-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'ALGO-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'ADA-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'ATOM-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'XRP-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'LTC-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
# ('FTX Futures', '1INCH-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
('FTX Futures', 'BCH-USD', '15m', 'ewoexit2708', '86lZ6^AX3'),
]
extra_candles = [
('FTX Futures', 'ETC-USD', '1h'),
# ('FTX Futures', 'LUNA-USD', '1h'),
('FTX Futures', 'FIL-USD', '1h'),
# ('FTX Futures', 'FTM-USD', '1h'),
('FTX Futures', 'ETH-USD', '1h'),
('FTX Futures', 'DOT-USD', '1h'),
('FTX Futures', 'XTZ-USD', '1h'),
('FTX Futures', 'BNB-USD', '1h'),
('FTX Futures', 'NEO-USD', '1h'),
('FTX Futures', 'SOL-USD', '1h'),
('FTX Futures', 'LINK-USD', '1h'),
('FTX Futures', 'XLM-USD', '1h'),
('FTX Futures', 'MATIC-USD', '1h'),
('FTX Futures', 'TRX-USD', '1h'),
('FTX Futures', 'BTC-USD', '1h'),
('FTX Futures', 'AAVE-USD', '1h'),
('FTX Futures', 'ALGO-USD', '1h'),
('FTX Futures', 'ADA-USD', '1h'),
('FTX Futures', 'ATOM-USD', '1h'),
('FTX Futures', 'XRP-USD', '1h'),
('FTX Futures', 'LTC-USD', '1h'),
# ('FTX Futures', '1INCH-USD', '1h'),
('FTX Futures', 'BCH-USD', '1h'),
]
| [
"[email protected]"
] | |
e976558a42813fa84f1375258c9478e7ecefb635 | 1a937b899af949d23e667782a7360b9de1634456 | /SoftUni/Exam/Problem-2-Bunny-Wars/bunny_wars.py | 526335b97d29876326703e478ca531c3d3c2278a | [] | no_license | stanislavkozlovski/data_structures_feb_2016 | c498df6ea7cb65d135057a300e0d7e6106713722 | adedac3349df249fe056bc10c11b0b51c49e24bb | refs/heads/master | 2021-07-06T17:37:18.117104 | 2017-09-30T19:01:51 | 2017-09-30T19:01:51 | 75,526,414 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,730 | py | import datrie
import string
from sortedcontainers import SortedDict, SortedSet
class Bunny:
def __init__(self, name, teamid, room):
self.name = name
self.reversed_name = ''.join(reversed(name))
self.room = room
self.health = 100
self.score = 0
self.team = teamid
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __gt__(self, other):
return self.name > other.name
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
class Room:
def __init__(self, id):
self.id = id
self.bunny_count = 0
self.bunnies = dict()
def __hash__(self):
return hash(id)
def __eq__(self, other):
return self.id == other.id
def __gt__(self, other):
return self.id > other.id
def __lt__(self, other):
return self.id < other.id
def __len__(self):
return self.bunny_count
def detonate(self, bunny):
"""
Detonate bunnyName – detonates the bunny, causing all bunnies from other teams in the same room
to suffer 30 damage to their health (their health is reduced by 30).
If a bunny with the given name does not exist, the command should throw an exception.
If a bunny falls to 0 or less health as a result of the detonation, it should be removed from the game.
For each removed enemy bunny, the detonated bunny should gain +1 score.
"""
score = 0
dead_bunnies = []
orig_bunny_team_id = bunny.team
for team_id in self.bunnies.keys():
# go through each bunny that's not from the original bunny's team
if team_id != orig_bunny_team_id:
for enemy_bunny in self.bunnies[team_id].values():
enemy_bunny.health -= 30
if enemy_bunny.health <= 0:
dead_bunnies.append(enemy_bunny)
score += 1
for dead_bunny in dead_bunnies: # delete each dead bunny
del self.bunnies[dead_bunny.team][dead_bunny.name]
bunny.score += score
return dead_bunnies # return the dead bunnies to be deleted from other collections
def add_bunny(self, bunny):
""" Adds the bunny to the room"""
if bunny.team not in self.bunnies:
self.bunnies[bunny.team] = dict()
self.bunnies[bunny.team][bunny.name] = bunny
self.bunny_count += 1
return bunny
def move_bunny_in(self, bunny: Bunny):
if bunny.team not in self.bunnies:
self.bunnies[bunny.team] = dict()
self.bunnies[bunny.team][bunny.name] = bunny
self.bunny_count += 1
def remove_bunny(self, bunny):
self.bunny_count += 1
del self.bunnies[bunny.team][bunny.name]
class BunnyWars:
def __init__(self):
self.rooms_by_idx = SortedSet() # integer ID only
self.rooms = SortedDict() # key: id, value: room
self.bunnies_by_team = {} # key: team id, value: SortedSet(key=bunny.reversed_name) of Bunny objects
self.bunnies_by_suffix = datrie.Trie(string.ascii_letters + ''.join(str(part) for part in range(0,10)))
self.bunny_names = {}
def next_bunny(self, bunny_name):
self._move_bunny(bunny_name)
def prev_bunny(self, bunny_name):
self._move_bunny(bunny_name, prev=True)
def bunny_count(self):
return len(self.bunny_names)
def room_count(self):
return len(self.rooms)
def list_bunnies_by_team(self, team_id):
"""
ListBunniesByTeam teamId - returns all bunnies from the specified team in (sorted by name in descending order).
"""
return reversed(self.bunnies_by_team[team_id])
def list_bunnies_by_suffix(self, suffix):
"""
ListBunniesBySuffix suffix -
returns all bunnies ending with the specified suffix (sorted by the ASCII code of the reversed name
in ascending order as a first criteria and by length in ascending order as a second criteria).
Example Tpen < apen < aapen < bapen < bpen.
"""
return self.bunnies_by_suffix.values(''.join(reversed(suffix)))
def detonate(self, bunny_name):
if bunny_name not in self.bunny_names:
raise Exception('Bunny does not exist!')
bunny = self.bunny_names[bunny_name]
room = self.rooms[bunny.room]
dead_bunnies = room.detonate(bunny) # detonate the bunny and get all the bunnies that have died
for dead_bunny in dead_bunnies:
self._delete_bunny(dead_bunny)
def add_room(self, id):
"""
Add roomId – adds a room to the structure.
Rooms have unique ids.
Rooms should be situated according to their id in ascending order.
If a room with the given Id exists the command should throw an exception.
"""
if id in self.rooms:
raise Exception('Room with id {id} is already registered!'.format(id=id))
self.rooms_by_idx.add(id)
self.rooms[id] = Room(id)
def add_bunny(self, bunny_name, team_id, room_id):
if room_id not in self.rooms or team_id > 4 or team_id < 0:
raise Exception('Invalid room/team id!')
if bunny_name in self.bunny_names:
raise Exception('A bunny with the given name already exists!')
bunny_obj = Bunny(name=bunny_name, teamid=team_id, room=room_id)
# 1. Add to the room
self.rooms[room_id].add_bunny(bunny_obj)
# 2. Add to overall bunnies
self.bunny_names[bunny_name] = bunny_obj
# 3. Add to suffixes
self.bunnies_by_suffix[bunny_obj.reversed_name] = bunny_obj
# 4. Add to bunnies by team
if bunny_obj.team not in self.bunnies_by_team:
self.bunnies_by_team[bunny_obj.team] = SortedSet()
self.bunnies_by_team[bunny_obj.team].add(bunny_obj)
def remove_room(self, room_id):
if room_id not in self.rooms:
raise Exception('A room with the id {id} does not exist!'.format(id=room_id))
room = self.rooms[room_id]
del self.rooms[room_id]
self.rooms_by_idx.remove(room_id)
# delete every bunny there
for bunnies_from_team in room.bunnies.values():
for bunny in bunnies_from_team.values():
self._delete_bunny(bunny)
def _move_bunny(self, bunny_name, prev=False):
if bunny_name not in self.bunny_names:
raise Exception()
bunny = self.bunny_names[bunny_name]
old_room_id = bunny.room
old_room = self.rooms[old_room_id]
old_room_index = self.rooms_by_idx.index(old_room_id)
if prev:
next_room_index = old_room_index - 1
else:
next_room_index = old_room_index + 1
if next_room_index >= len(self.rooms_by_idx) or next_room_index < 0: # is out of bounds
next_room_index = 0 if prev else len(self.rooms_by_idx) - 1
# get the new room id and assign it to the bunny
new_room_id = self.rooms_by_idx[next_room_index]
bunny.room = new_room_id
new_room = self.rooms[new_room_id]
# remove the bunny from the old room and move it to the new one
old_room.remove_bunny(bunny)
new_room.move_bunny_in(bunny)
def _delete_bunny(self, bunny: Bunny):
# 1.Remove from overall bunnies
del self.bunny_names[bunny.name]
# 2.Remove from suffixes
del self.bunnies_by_suffix[bunny.reversed_name]
# 3.Remove from bunnies by team
self.bunnies_by_team[bunny.team].remove(bunny)
def main_loop():
""" Take commands from the bunny wars commander! """
wars = BunnyWars()
while True:
command = input()
args = command.split()
if command.startswith('Add'):
# add commands
if len(args) > 2: # add a bunny
bunny_name = args[1]
team_id = int(args[2])
room_id = int(args[3])
wars.add_bunny(bunny_name, team_id, room_id)
else: # add a room
room_id = int(args[1])
wars.add_room(room_id)
elif command == 'BunnyCount':
print('The amount of bunnies is: {}'.format(wars.bunny_count()))
elif command == 'RoomCount':
print('The amount of rooms is: {}'.format(wars.room_count()))
elif command.startswith('Remove'):
# remove a room
room_id = int(args[1])
wars.remove_room(room_id)
elif command.startswith('Next'):
# move the bunny to the next room
bunny_name = args[1]
wars.next_bunny(bunny_name)
elif command.startswith('Previous'):
# move the bunny to the previous room
bunny_name = args[1]
wars.prev_bunny(bunny_name)
elif command.startswith('Detonate'):
# detonates a bunny
bunny_name = args[1]
wars.detonate(bunny_name)
elif command.startswith('ListBunniesByTeam'):
# lists the bunnies from the given team
team_id = int(args[1])
print('\n'.join([str(bun) for bun in wars.list_bunnies_by_team(team_id)]))
elif command.startswith('ListBunniesBySuffix'):
# lists the bunnies that end in the given suffix
suffix = args[1]
print('\n'.join([str(bun) for bun in wars.list_bunnies_by_suffix(suffix)]))
def main():
main_loop()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4805be7815446bf43c70387ee55ae67dd9eb421b | cffe83637b3965ad27f5a679e187bfaf46afa690 | /.stversions/cookbook/magic_browser/cookbook/cookbook/.stversions/blender/menus/utilities/DeleteDefaults~20201019-122406~20210212-114808.py | 0e6df3d29eb9c86b1351c57ea11ff99386143025 | [] | no_license | gmolinart/LC_MASTER | da768a592821fe4dc55bdf693291df3409c3f035 | 2f17eaf5c4c7f70be0c0b5976b479002da4e7d52 | refs/heads/master | 2023-04-29T07:38:24.653457 | 2021-05-17T18:42:34 | 2021-05-17T18:42:34 | 368,287,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import bpy
# from cgl.plugins.blender import lumbermill as lm
class DeleteDefaults(bpy.types.Operator):
"""
This class is required to register a button in blender.
"""
bl_idname = 'object.delete_defaults'
bl_label = 'Delete Defaults'
def execute(self, context):
run()
return {'FINISHED'}
def run():
"""
This run statement is what's executed when your button is pressed in blender.
:return:
"""
for object in bpy.data.objects:
if 'DEFAULT' in object.name:
bpy.data.objects.remove(object)
for collection in bpy.data.collections:
if 'DEFAULT' in collection.name:
bpy.data.collections.remove(collection)
print('Defaults deleted')
| [
"[email protected]"
] | |
8a41096df1f6fffbe6e574d02dcbbe0cb1336a33 | 71acb7214efd91c0d327f6d8958e1798eadb4401 | /locations/spiders/fast_stop_us.py | 52aba2f44fcbdd93fc49d2c9ccbf9017896e264b | [
"CC0-1.0",
"MIT"
] | permissive | alltheplaces/alltheplaces | 21b9f8b4ace1352e52ae7b8f8825a930d2cb033e | 1bcbb55cfcf06f2c714465570711f6e83f205c22 | refs/heads/master | 2023-08-30T19:45:35.098658 | 2023-08-30T17:51:54 | 2023-08-30T17:51:54 | 61,166,935 | 453 | 176 | NOASSERTION | 2023-09-14T17:16:40 | 2016-06-15T01:09:18 | Python | UTF-8 | Python | false | false | 1,286 | py | import re
from urllib.parse import urljoin
import chompjs
from scrapy import Spider
from locations.linked_data_parser import LinkedDataParser
from locations.microdata_parser import convert_item, get_object
class FastStopUSSpider(Spider):
name = "fast_stop_us"
item_attributes = {"brand": "FAST STOP", "brand_wikidata": "Q116734101"}
start_urls = ["https://www.efaststop.com/store-locator"]
def parse(self, response, **kwargs):
coords_map = {}
if m := re.search(r"init_map\(.+, (\[.+\]), (\[.+\])\);", response.text):
coords, popup = m.groups()
lat_lon = re.compile(r"LatLng\((-?\d+\.\d+), (-?\d+\.\d+)\)")
for location in chompjs.parse_js_object(coords):
if ll := re.search(lat_lon, location["position"]):
coords_map[location["title"]] = ll.groups()
for location in response.xpath('//section[@itemtype="http://schema.org/GasStation"]'):
ld = convert_item(get_object(location.root))
item = LinkedDataParser.parse_ld(ld)
item["ref"] = item["website"] = urljoin(response.url, location.xpath(".//a/@href").get())
if ll := coords_map.get(item["name"]):
item["lat"], item["lon"] = ll
yield item
| [
"[email protected]"
] | |
61d4e1404221084feb930c2969ad4654b6919777 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/lethain-lifeflow/allPythonContent.py | e407c166df6bf7e187658a73694e7fea0de1ca30 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182,365 | py | __FILENAME__ = admin
from django.contrib import admin
from lifeflow.models import *
class CommentAdmin(admin.ModelAdmin):
list_display = ('entry', 'name', 'email', 'webpage', 'date')
search_fields = ['name', 'email','body']
admin.site.register(Comment, CommentAdmin)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'link')
search_fields = ['name']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Author, AuthorAdmin)
class EntryAdmin(admin.ModelAdmin):
list_display = ('title', 'pub_date')
search_fields = ['title', 'summary', 'body']
prepopulated_fields = {'slug': ('title',)}
filter_horizontal = ('flows','tags','series','resources','authors')
fieldsets = (
(None, {'fields' : ('title', 'slug', 'pub_date',)}),
('Content', {'fields': ('summary', 'body',)}),
('Options', {'fields': ('use_markdown', 'is_translation', 'send_ping', 'allow_comments', ), 'classes': 'collapse'}),
('Authors', {'fields' : ('authors',), 'classes': 'collapse'}),
('Resources', {'fields' : ('resources',), 'classes': 'collapse'}),
('Series', {'fields': ('series',), 'classes': 'collapse'}),
('Organization', {'fields': ('flows', 'tags',),}),
)
admin.site.register(Entry, EntryAdmin)
class ProjectAdmin(admin.ModelAdmin):
list_display = ('title', 'language', 'license', 'size',)
search_fields = ['title', 'summary', 'body']
prepopulated_fields = {'slug': ('title',)}
filter_horizontal = ('resources',)
fieldsets = (
(None, {'fields' : ('title', 'slug', 'size', 'language', 'license', 'use_markdown',)} ),
('Content', {'fields': ('summary', 'body', 'resources')} ),
)
admin.site.register(Project, ProjectAdmin)
# Custom admins required due to slug field
class SeriesAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Series, SeriesAdmin)
class TagAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Tag, TagAdmin)
class FlowAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Flow, FlowAdmin)
class LanguageAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Language, LanguageAdmin)
# Simple admin interfaces
admin.site.register(Resource)
admin.site.register(RecommendedSite)
admin.site.register(SiteToNotify)
admin.site.register(Translation)
########NEW FILE########
__FILENAME__ = akismet
#!/usr/bin/python
__version__ = "0.3"
__date__ = "2005-12-01"
__author__ = "David Lynch (kemayo AT Google's mail service DOT com)"
__copyright__ = "Copyright 2005, David Lynch"
__license__ = "Python"
__history__ = """
0.3 - 20051205 - Cleaned up __post.
0.2 - 20051201 - Added documentation, and tweaked the circumstances where an error
will be thrown.
0.1 - 20051201 - Initial release. Everything pretty much works. Probably.
"""
import httplib
from urllib import urlencode
USERAGENT = ""
AKISMET_URL = "rest.akismet.com"
AKISMET_PORT = 80
class AkismetError(Exception):
def __init__(self, response, statuscode):
self.response = response
self.statuscode = statuscode
def __str__(self):
return repr(self.value)
def __post(request, host, path, port = 80):
connection = httplib.HTTPConnection(host, port)
connection.request("POST", path, request,
{"User-Agent":"%s | %s/%s" % (USERAGENT,"Akistmet.py", __version__),
"Content-type":"application/x-www-form-urlencoded"})
response = connection.getresponse()
return response.read(), response.status
def verify_key(key, blog):
"""Find out whether a given WordPress.com API key is valid.
Required parameters:
key: A WordPress.com API key.
blog: URL of the front page of the site comments will be submitted to.
Returns True if a valid key, False if invalid.
"""
response, status = __post("key=%s&blog=%s" % (key,blog), AKISMET_URL, "/1.1/verify-key", AKISMET_PORT)
if response == "valid":
return True
elif response == "invalid":
return False
else:
raise AkismetError(response, status)
def comment_check(key, blog, user_ip, user_agent, **other):
"""Submit a comment to find out whether Akismet thinks that it's spam.
Required parameters:
key: A valid WordPress.com API key, as tested with verify_key().
blog: URL of the front page of the site the comment will appear on.
user_ip: IP address of the being which submitted the comment.
user_agent: User agent reported by said being.
Suggested "other" keys: "permalink", "referrer", "comment_type", "comment_author",
"comment_author_email", "comment_author_url", "comment_content", and any other HTTP
headers sent from the client.
More detail on what should be submitted is available at:
http://akismet.com/development/api/
Returns True if spam, False if ham. Throws an AkismetError if the server says
anything unexpected.
"""
request = {'blog': blog, 'user_ip': user_ip, 'user_agent': user_agent}
request.update(other)
response, status = __post(urlencode(request), "%s.%s" % (key,AKISMET_URL), "/1.1/comment-check", AKISMET_PORT)
if response == "true":
return True
elif response == "false":
return False
else:
raise AkismetError(response, status)
def submit_spam(key, blog, user_ip, user_agent, **other):
"""Report a false negative to Akismet.
Same arguments as comment_check.
Doesn't return anything. Throws an AkismetError if the server says anything.
"""
request = {'blog': blog, 'user_ip': user_ip, 'user_agent': user_agent}
request.update(other)
response, status = __post(urlencode(request), "%s.%s" % (key,AKISMET_URL), "/1.1/submit-spam", AKISMET_PORT)
if status != 200 or response != "":
raise AkismetError(response, status)
def submit_ham(key, blog, user_ip, user_agent, **other):
"""Report a false positive to Akismet.
Same arguments as comment_check.
Doesn't return anything. Throws an AkismetError if the server says anything.
"""
request = {'blog': blog, 'user_ip': user_ip, 'user_agent': user_agent}
request.update(other)
response, status = __post(urlencode(request), "%s.%s" % (key,AKISMET_URL), "/1.1/submit-ham", AKISMET_PORT)
if status != 200 or response != "":
raise AkismetError(response, status)
########NEW FILE########
__FILENAME__ = captcha
__license__ = """Copyright (c) 2007 Will R Larson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
__author__ = "Will R Larson"
__email__ = "lethain at google's email service"
__description__ = "A library for generating simple text-only captchas."
__api__ = """
The API for a captcha is two methods:
check(answer) --> True or False
question --> either a human or html formatted string
that represents the question being asked
"""
__todo__ = """
MissingColorCaptcha
Optionally encode questions using HTML character entitites
"""
import random
class BaseMathCaptcha(object):
def __init__(self, qty=2, min=10, max=30, str_ops=False):
self._str_ops = str_ops
self._answer = None
self._question = None
self.numbers = []
for i in xrange(qty):
num = random.randint(min, max)
self.numbers.append(num)
def check(self, answer):
if self._answer is None:
self._calculate_answer()
if int(answer) == self._answer:
return True
else:
return False
def _calculate_answer(self):
op = self._operation()
self._answer = reduce(op, self.numbers)
def question(self):
if self._question is None:
str_numbers = []
for number in self.numbers:
str_numbers.append(str(number))
op_string = self._op_string()
self._question = op_string.join(str_numbers)
return self._question
class AdditionCaptcha(BaseMathCaptcha):
'Captcha for addition problems.'
def _operation(self):
return lambda a, b: a + b
def _op_string(self):
if self._str_ops is True:
return " plus "
else:
return " + "
class SubtractionCaptcha(BaseMathCaptcha):
'Captcha for subtraction problems.'
def _operation(self):
return lambda a, b: a - b
def _op_string(self):
if self._str_ops is True:
return " minus "
else:
return " - "
class MultiplicationCaptcha(BaseMathCaptcha):
'Captcha for multiplication problems.'
def _operation(self):
return lambda a, b: a * b
def _op_string(self):
if self._str_ops is True:
return " times "
else:
return " * "
class MissingNumberCaptcha(object):
def __init__(self, min=1, max=4):
if min == max:
self._question = ""
self.missing = min
else:
self.missing = random.randint(min, max)
numbers = range(min-1, max+2)
if len(numbers) > 0:
numbers.remove(self.missing)
numbers = map(lambda x : str(x), numbers)
self._question = " ".join(numbers)
else:
self._question = ""
def check(self, answer):
if int(answer) == self.missing:
return True
else:
return False
def question(self):
return self._question
def __str__(self):
return self.question()
########NEW FILE########
__FILENAME__ = tests
# tests for captcha.py
import unittest, sys
from captcha import *
class TestCaptcha(unittest.TestCase):
def test_AdditionCaptcha(self):
c = AdditionCaptcha(qty=5, min=5, max=5)
self.assertEqual(c.check(25), True)
self.assertEqual(c.check(24), False)
self.assertEqual(c.check(26), False)
qst = "5 + 5 + 5 + 5 + 5"
self.assertEqual(c.question(), qst)
c = AdditionCaptcha(qty=20, min=10, max=1000)
answer = reduce(lambda a,b : a + b, c.numbers)
self.assertEqual(c.check(answer), True)
c = AdditionCaptcha(qty=2, min=10, max=10, str_ops=True)
self.assertEqual(c.check(20), True)
self.assertEqual(c.question(), "10 plus 10")
def test_SubtractionCaptcha(self):
c = SubtractionCaptcha(qty=2, min=5, max=5)
self.assertEqual(c.check(0), True)
self.assertEqual(c.question(), "5 - 5")
c = SubtractionCaptcha(qty=10, min=10, max=1000)
answer = reduce(lambda a,b: a - b, c.numbers)
self.assertEqual(c.check(answer), True)
c = SubtractionCaptcha(qty=2, min=10, max=10, str_ops=True)
self.assertEqual(c.check(0), True)
self.assertEqual(c.question(), "10 minus 10")
def test_MultiplicationCaptcha(self):
c = MultiplicationCaptcha(qty=3, min=10, max=10)
self.assertEqual(c.check(1000), True)
self.assertEqual(c.question(), "10 * 10 * 10")
c = MultiplicationCaptcha(qty=10, min=10, max=1000)
answer = reduce(lambda a,b : a * b , c.numbers)
self.assertEqual(c.check(answer), True)
c = MultiplicationCaptcha(qty=2, min=10, max=10, str_ops=True)
self.assertEqual(c.check(100), True)
self.assertEqual(c.question(), "10 times 10")
def test_MissingNumberCaptcha(self):
c = MissingNumberCaptcha(min=5,max=5)
self.assertEqual(c.check(5), True)
def main(argv=None):
if argv is None: argv = sys.argv
unittest.main(argv=["tests.py"])
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = context
from lifeflow.models import Entry, Flow, RecommendedSite, Author, Flow, Language
from django.contrib.sites.models import Site
from django.conf import settings
def blog(request):
def make_slug(str):
return str.lower().replace(" ","-")
recent = Entry.current.all()[:5]
random = Entry.current.all().order_by('?')[:5]
blog_roll = RecommendedSite.objects.all()
flows = Flow.objects.all()
site = Site.objects.get(pk=settings.SITE_ID)
analytics_id = getattr(settings, 'LIFEFLOW_GOOGLE_ANALYTICS_ID', None)
use_projects = getattr(settings, 'LIFEFLOW_USE_PROJECTS', True)
keywords = getattr(settings, 'LIFEFLOW_KEYWORDS', "blog")
description = getattr(settings, 'LIFEFLOW_DESCRIPTION', "blog")
author = getattr(settings, 'LIFEFLOW_AUTHOR_NAME', None)
template_author = getattr(settings, 'LIFEFLOW_TEMPLATE_AUTHOR', "Will Larson")
template_author_url = getattr(settings, 'LIFEFLOW_TEMPLATE_AUTHOR_URL', "http://www.lethain.com/")
if author is None:
try:
author = Author.objects.get(pk=1).name
except:
author = "Anonymous"
author_slug = make_slug(author)
blog_name = getattr(settings, 'LIFEFLOW_BLOG_NAME', "Unconfigured LifeFlow")
custom_css = getattr(settings, 'LIFEFLOW_CUSTOM_CSS', None)
custom_js_header = getattr(settings, 'LIFEFLOW_CUSTOM_JS_HEADER', None)
custom_js_footer = getattr(settings, 'LIFEFLOW_CUSTOM_JS_FOOTER', None)
return {
'blog_roll' : blog_roll,
'lifeflow_google_analytics_id':analytics_id,
'lifeflow_blog_name':blog_name,
'lifeflow_custom_css':custom_css,
'lifeflow_custom_js_header':custom_js_header,
'lifeflow_custom_js_footer':custom_js_footer,
'lifeflow_flows':flows,
'lifeflow_keywords':keywords,
'lifeflow_description':description,
'lifeflow_author':author,
'lifeflow_author_slug':author_slug,
'lifeflow_use_projects':use_projects,
'lifeflow_template_author':template_author,
'lifeflow_template_author_url':template_author_url,
'recent_entries' : recent,
'random_entries' : random,
'site' : site,
}
########NEW FILE########
__FILENAME__ = models
from django.db import models
# Create your models here.
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import *
from lifeflow.models import *
urlpatterns = patterns('lifeflow.editor.views',
(r'^$', 'overview'),
(r'^comments/$', 'comments'),
(r'^files/$', 'files'),
(r'^admin/blogroll/$', 'blogroll'),
(r'^admin/sites_to_ping/$', 'sites_to_notify'),
(r'^admin/site_config/$', 'site_config'),
(r'^authors/$', 'authors'),
(r'^authors/create/$', 'create_author'),
(r'^authors/(?P<id>\d+)/$', 'author_edit'),
(r'^authors/(?P<id>\d+)/create/$', 'create_author'),
(r'^projects/$', 'projects'),
(r'^projects/create/$', 'create_project'),
(r'^projects/(?P<id>\d+)/details/$','project_details'),
(r'^projects/(?P<id>\d+)/body/$','project_body'),
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
(r'^create/$', 'create'),
(r'^update/$', 'update'),
(r'^display_resource/(?P<id>\d+)/$', 'display_resource'),
(r'^add_resource/', 'add_resource'),
(r'^display_author/(?P<id>\d+)/$', 'display_author'),
(r'^add_author_picture/', 'add_author_picture'),
(r'^create_model/', 'create_model'),
(r'^delete_model/', 'delete_model'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/title/$', 'article_title'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/body/$', 'article_body'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/flows/$', 'article_flows'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/tags/$', 'article_tags'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/series/$', 'article_series'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/options/$', 'article_options'),
(r'^edit/(?P<category>\w+)/(?P<id>\d+)/authors/$', 'article_authors'),
(r'^rough_to_edited/(?P<id>\d+)/$', 'rough_to_edited'),
(r'^edited_to_rough/(?P<id>\d+)/$', 'edited_to_rough'),
(r'^edited_to_published/(?P<id>\d+)/$', 'edited_to_published'),
(r'^published_to_edited/(?P<id>\d+)/$', 'published_to_edited'),
(r'^render/(?P<model>\w+)/(?P<id>\d+)/$', 'render'),
(r'^render/$', 'render'),
)
########NEW FILE########
__FILENAME__ = views
"""
TODO
- write function to check a Draft for missing requirements before
transformation into an Entry, and report that data when the
transformation fails, instead of just "It failed" msg
- File upload functionality
- Setting datetime
- display list of files in zipfile resources
- display code for code resources
"""
import datetime, os.path, time, re
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponseRedirect, HttpResponseServerError
from lifeflow.models import *
from lifeflow.text_filters import entry_markup
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib.auth import logout as auth_logout
from django.contrib.auth import login as auth_login
from django.contrib.auth import views, authenticate
from django.core.paginator import QuerySetPaginator
from django.contrib.sites.models import Site
from pygments import highlight
from pygments.util import ClassNotFound
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_for_filename
from zipfile import ZipFile
CHARACTERS_TO_STRIP = re.compile(r"[ \.,\!\?'\";:/\\+=#]+")
def sluggify(str):
return CHARACTERS_TO_STRIP.subn(u"-", str.lower())[0].strip("-")
def login(request):
error_msg = u""
if request.method == "POST":
POST = request.POST.copy()
username = POST.get('username',"")
password = POST.get('password',"")
if username == "" or password == "":
error_msg = u"Your username AND password, si vous plait."
else:
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth_login(request, user)
return HttpResponseRedirect("/editor/")
else:
error_msg = u"It works better when the username and password match."
return render_to_response("lifeflow/editor/login.html",
{"login_screen":True,
'error_message':error_msg})
def logout(request):
auth_logout(request)
return HttpResponseRedirect("/")
@login_required
def overview(request):
rough = Draft.objects.filter(edited=False)
edited = Draft.objects.filter(edited=True)
published = Entry.objects.all()
return render_to_response('lifeflow/editor/overview.html',
{'rough':rough,
'edited':edited,
'published':published},
RequestContext(request, {}))
@login_required
def comments(request):
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Comment.objects.all(), 5).page(page)
return render_to_response('lifeflow/editor/comments.html',
{'page':page},
RequestContext(request,{}))
@login_required
def blogroll(request):
blogroll = RecommendedSite.objects.all()
return render_to_response('lifeflow/editor/blogroll.html',
{'blogroll':blogroll},
RequestContext(request,{}))
@login_required
def sites_to_notify(request):
sites = SiteToNotify.objects.all()
return render_to_response('lifeflow/editor/sites_to_ping.html',
{'sites_to_notify':sites},
RequestContext(request,{}))
@login_required
def site_config(request):
site = Site.objects.get_current()
return render_to_response('lifeflow/editor/site.html',
{'sites_to_notify':site},
RequestContext(request,{}))
@login_required
def files(request):
resources = Resource.objects.all()
return render_to_response('lifeflow/editor/files.html',
{'resources':resources},
RequestContext(request,{}))
@login_required
def projects(request):
projects = Project.objects.all()
return render_to_response('lifeflow/editor/projects.html',
{'projects':projects},
RequestContext(request,{}))
@login_required
def project_details(request, id):
project = Project.objects.get(pk=id)
return render_to_response('lifeflow/editor/project_details.html',
{'object':project},
RequestContext(request,{}))
@login_required
def project_body(request, id):
project = Project.objects.get(pk=id)
resources = Resource.objects.all()
return render_to_response('lifeflow/editor/project_body.html',
{'object':project,
'resources':resources},
RequestContext(request,{}))
@login_required
def authors(request):
authors = Author.objects.all()
selected = len(authors)-1
author = authors[selected]
return render_to_response('lifeflow/editor/authors.html',
{'author':author,
'authors':authors,
'selected':selected},
RequestContext(request,{}))
@login_required
def author_edit(request,id):
author = Author.objects.get(pk=id)
return render_to_response('lifeflow/editor/author.html',
{'author':author},
RequestContext(request,{}))
BOOLEAN_FIELDS = ["send_ping", "allow_comments", "use_markdown"]
MANY_TO_MANY_FIELDS = ["flows", "tags", "series", "authors"]
SLUG_FIELDS = ["slug"]
DATETIME_FIELDS = ["pub_date"]
@login_required
def update(request):
dict = request.POST.copy()
id = dict.pop('pk')[0]
model = dict.pop('model')[0]
object = get_class(model).objects.get(pk=id)
obj_dict = object.__dict__
for key in dict.keys():
if obj_dict.has_key(key):
val = dict[key]
if key in BOOLEAN_FIELDS:
if val == u"true":
val = True
elif val == u"false":
val = False
elif key in SLUG_FIELDS:
val = sluggify(val)
elif key in DATETIME_FIELDS:
t = time.mktime(time.strptime(val, "%Y-%m-%d %H:%M:%S"))
val = datetime.datetime.fromtimestamp(t)
obj_dict[key] = val
elif key in MANY_TO_MANY_FIELDS:
vals = dict.getlist(key)
manager = getattr(object, key)
manager.clear()
if not (len(vals) == 1 and int(vals[0]) == -1):
manager.add(*vals)
object.save()
return HttpResponse("success")
API_CLASSES = {"comment":Comment, "project":Project, "flow":Flow, "tag":Tag, "series":Series, "draft":Draft, "entry":Entry, "author":Author, "resource":Resource, "recommendedsite":RecommendedSite,'site_to_notify':SiteToNotify,'site':Site,"language":Language}
def get_class(str):
return API_CLASSES[str]
@login_required
def delete_model(request):
cls = get_class(request.POST['model'])
pk = request.POST['pk']
try:
cls.objects.get(pk=pk).delete()
return HttpResponse("success")
except:
return HttpResponseServerError(u"fail")
@login_required
def create_model(request):
def unique(slug, model):
if model.objects.filter(slug=slug).count() == 0:
return True
return False
toReturn = HttpResponseRedirect(request.META['HTTP_REFERER'])
model = request.POST['model']
if model in [u"flow", u"tag", u"series"]:
cls = get_class(model)
title = request.POST[u'title']
slug = sluggify(title)
if unique(slug, cls):
f = cls(title=title, slug=slug)
f.save()
elif model == u"language":
title = request.POST[u'title']
slug = sluggify(title)
l = Language(title=title,slug=slug)
l.save()
elif model == u"site_to_notify":
title = request.POST[u'title']
url_to_ping = request.POST[u'url_to_ping']
blog_title = request.POST[u'blog_title']
blog_url = request.POST[u'blog_url']
s = SiteToNotify(title=title,url_to_ping=url_to_ping,blog_title=blog_title,blog_url=blog_url)
s.save()
elif model == u"translation":
translated_pk = int(request.POST[u'pk'])
translated = Entry.objects.get(pk=translated_pk)
original_pk = int(request.POST[u'original'])
language_pk = int(request.POST[u'language'])
if original_pk == -1 or language_pk == -1:
[ x.delete() for x in Translation.objects.filter(original=translated)]
[ x.delete() for x in Translation.objects.filter(translated=translated)]
else:
original = Entry.objects.get(pk=original_pk)
language = Language.objects.get(pk=language_pk)
t = Translation(language=language,original=original,translated=translated)
t.save()
# update toReturn to return rendered template of translations
translations = Translation.objects.filter(translated=translated)
toReturn = render_to_response('lifeflow/editor/translations.html',
{'translations':translations},
RequestContext(request, {}))
elif model == u"recommendedsite":
title = request.POST[u'title']
url = request.POST[u'url']
f = RecommendedSite(title=title, url=url)
f.save()
return toReturn
@login_required
def add_author_picture(request):
id = request.POST['pk']
file = request.FILES['file']
filename = file.name
filebase = '%s/lifeflow/author/' % settings.MEDIA_ROOT
filepath = "%s%s" % (filebase, filename)
while (os.path.isfile(filepath)):
filename = "_%s" % filename
filepath = "%s%s" % (filebase, filename)
fd = open(filepath, 'wb')
fd.write(file.read())
fd.close()
author = Author.objects.get(pk=id)
author.picture = "lifeflow/author/%s" % filename
author.save()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def display_author(request, id):
pass
@login_required
def add_resource(request):
file = request.FILES['file']
title = request.POST['title']
markdown_id = request.POST['markdown_id']
filename = file.name
filebase = '%s/lifeflow/resource/' % settings.MEDIA_ROOT
filepath = "%s%s" % (filebase, filename)
while (os.path.isfile(filepath)):
filename = "_%s" % filename
filepath = "%s%s" % (filebase, filename)
fd = open(filepath, 'wb')
fd.write(file.read())
fd.close()
rec = Resource(title=title, markdown_id=markdown_id, content="lifeflow/resource/%s" % filename)
rec.save()
id = request.POST['pk']
model = request.POST['model']
return HttpResponseRedirect(request.META['HTTP_REFERER'])
#return HttpResponseRedirect("/editor/edit/%s/%s/2/" % (model, id))
IMAGE_EXTS = ["jpg", "jpeg", "png", "gif"]
ZIP_EXTS = ["zip"]
CODE_EXTS = ["css", "html", "htm", "c", "o", "py", "lisp", "js", "xml",
"java", "rb"]
@login_required
def display_resource(request, id):
res = Resource.objects.get(pk=id)
file = res.content.path.split("/")[-1]
opts = {'object':res,'file':file}
ext = opts['file'].split(".")[-1]
opts['type'] = 'file'
if ext in IMAGE_EXTS:
opts['type'] = "image"
elif ext in ZIP_EXTS:
try:
opts['type'] = "zip"
zf = ZipFile(res.content.path,'r')
opts['files_list'] = zf.namelist()
zf.close()
except IOError:
opts['type'] = "file"
else:
try:
lexer = get_lexer_for_filename(file)
f = open(res.content.path,'r')
data = f.read()
f.close()
opts['highlighted_code'] = highlight(data,lexer,HtmlFormatter())
opts['type'] = "code"
except ClassNotFound:
opts['type'] = "file"
except IOError:
opts['type'] = "file"
return render_to_response('lifeflow/editor/resource.html',opts,RequestContext(request, {}))
@login_required
def article_title(request, category, id):
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
return render_to_response('lifeflow/editor/article_title.html',
{'object':obj,
'model':category},
RequestContext(request, {}))
@login_required
def article_body(request, category, id):
resources = Resource.objects.all()
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
return render_to_response('lifeflow/editor/article_body.html',
{'object':obj,
'resources':resources,
'model':category},
RequestContext(request, {}))
@login_required
def article_flows(request, category, id):
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
obj_flows = obj.flows.all()
flows = [ (x, x in obj_flows) for x in Flow.objects.all()]
return render_to_response('lifeflow/editor/article_flows.html',
{'object':obj,
'flows':flows,
'model':category},
RequestContext(request, {}))
@login_required
def article_tags(request, category, id):
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
obj_tags = obj.tags.all()
tags = [ (x, x in obj_tags) for x in Tag.objects.all()]
return render_to_response('lifeflow/editor/article_tags.html',
{'object':obj,
'tags':tags,
'model':category},
RequestContext(request, {}))
@login_required
def article_series(request, category, id):
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
obj_series = obj.series.all()
series = [ (x, x in obj_series) for x in Series.objects.all()]
return render_to_response('lifeflow/editor/article_series.html',
{'object':obj,
'series':series,
'model':category},
RequestContext(request, {}))
@login_required
def article_options(request, category, id):
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
return render_to_response('lifeflow/editor/article_options.html',
{'object':obj,
'model':category},
RequestContext(request, {}))
@login_required
def article_authors(request, category, id):
if category == "entry":
obj = Entry.objects.get(pk=id)
else:
obj = Draft.objects.get(pk=id)
obj_authors = obj.authors.all()
authors = [ (x, x in obj_authors) for x in Author.objects.all()]
langs = Language.objects.all()
entries = Entry.objects.all()
translations = Translation.objects.filter(translated=obj)
return render_to_response('lifeflow/editor/article_authors.html',
{'object':obj,
'authors':authors,
'langs':langs,
'entries':entries,
'translations':translations,
'model':category},
RequestContext(request, {}))
@login_required
def rough_to_edited(request, id):
try:
obj = Draft.objects.get(pk=id)
obj.edited = True
obj.save()
return HttpResponse(u"%s" % obj.pk)
except:
return HttpResponseServerError(u"Failed.")
@login_required
def edited_to_rough(request, id):
try:
obj = Draft.objects.get(pk=id)
obj.edited = False
obj.save()
return HttpResponse(u"%s" % obj.pk)
except:
return HttpResponseServerError(u"Failed.")
@login_required
def edited_to_published(request, id):
def check(dict):
complaints = []
if dict[u"title"] in [None, u""]:
complaints.append("You need to give the entry a title first.")
if dict[u"body"] in [None, u""]:
complaints.append("You'll need to fill out the article a bit before publishing it.")
if complaints == []:
return True
else:
return "\n<br>\n".join(complaints)
def transform(draft):
dict = draft.__dict__.copy()
del dict['id']
if dict['pub_date'] is None:
dict['pub_date'] = datetime.datetime.now()
del dict['edited']
if dict['slug'] is None and dict['title'] is not None:
dict['slug'] = sluggify(dict['title'])
entry = Entry(**dict)
valid = check(entry.__dict__)
if valid != True:
return None, valid
else:
entry.save()
for field in MANY_TO_MANY_FIELDS:
getattr(entry, field).add(*getattr(draft, field).all())
return entry, True
try:
draft = Draft.objects.get(pk=id)
entry, result = transform(draft)
if result == True:
draft.delete()
return HttpResponse(u"%s" % entry.pk)
else:
return HttpResponseServerError(result)
except TypeError:
return HttpResponseServerError(u"The draft is missing required fields.")
except:
return HttpResponseServerError(u"The update made it to the server, but failed for unknown reasons.")
@login_required
def published_to_edited(request, id):
def transform(entry):
dict = entry.__dict__.copy()
dict['edited'] = True
del dict['body_html']
del dict['id']
draft = Draft(**dict)
draft.save()
for field in MANY_TO_MANY_FIELDS:
getattr(draft, field).add(*getattr(entry, field).all())
return draft
try:
entry = Entry.objects.get(pk=id)
draft = transform(entry)
entry.delete()
return HttpResponse(u"%s" % draft.pk)
except:
return HttpResponseServerError(u"Update failed.")
@login_required
def create(request):
obj = Draft()
obj.save()
return HttpResponseRedirect("../edit/draft/%s/title/" % obj.pk)
@login_required
def create_project(request):
obj = Project()
obj.save()
return HttpResponseRedirect("/editor/projects/%s/details/" % obj.pk)
@login_required
def create_author(request,id=None):
obj = Author()
obj.save()
return HttpResponseRedirect("/editor/authors/")
@login_required
def render(request, model=None, id=None):
if id is None and request.POST.has_key('pk'):
id = request.POST['pk']
if id is None:
txt = entry_markup(request.POST['txt'])
else:
if model == u"draft":
obj = Draft.objects.get(pk=id)
elif model ==u"entry":
obj = Entry.objects.get(pk=id)
elif model == u"project":
obj = Project.objects.get(pk=id)
if obj.use_markdown:
txt = entry_markup(obj.body, obj)
else:
txt = obj.body
return HttpResponse(txt)
########NEW FILE########
__FILENAME__ = feeds
from django.contrib.syndication.feeds import Feed
from django.conf import settings
from lifeflow.models import *
class AllFeed(Feed):
title = u"%s" % settings.LIFEFLOW_BLOG_NAME
link = u"/"
description = u"The full feed of all entries! Piping hot and ready for consumption."
copyright = u'Creative Commons License'
def items(self):
return Entry.current.all().order_by('-pub_date')[:25]
def item_pubdate(self, item):
return item.pub_date
class FlowFeed(Feed):
def get_object(self, bits):
slug = bits[0]
return Flow.objects.get(slug=slug)
def title(self, obj):
return u"%s: %s" % (settings.LIFEFLOW_BLOG_NAME,
obj.title)
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u"The piping hot feed for all entries in the %s flow." % obj.title
def items(self, obj):
return obj.latest(qty=25)
def item_pubdate(self, item):
return item.pub_date
class TagFeed(Feed):
def get_object(self, bits):
slug = bits[0]
return Tag.objects.get(slug=slug)
def title(self, obj):
return u"%s: the %s tag" % (settings.LIFEFLOW_BLOG_NAME,
obj.title)
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u"All entries tagged with %s." % obj.title
def items(self, obj):
return obj.latest()
def item_pubdate(self, item):
return item.pub_date
class AuthorFeed(Feed):
def get_object(self, bits):
slug = bits[0]
return Author.objects.get(slug=slug)
def title(self, obj):
return u"%s: %s" % (settings.LIFEFLOW_BLOG_NAME,
obj.name)
def title(self, obj):
return u"Feed for stuff by %s." % obj.name
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u"Recent entries written by %s." % obj.name
def items(self, obj):
return obj.latest()
def item_pubdate(self, item):
return item.pub_date
class LanguageFeed(Feed):
def get_object(self, bits):
slug = bits[0]
return Language.objects.get(slug=slug)
def title(self, obj):
return u"%s: %s" % (settings.LIFEFLOW_BLOG_NAME,
obj.title)
def title(self, obj):
return u"Feed for stuff translated into %s." % obj.title
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u"Recent entries translated into %s." % obj.title
def items(self, obj):
return obj.latest()
def item_pubdate(self, item):
return item.pub_date
class SeriesFeed(Feed):
def get_object(self, bits):
slug = bits[0]
return Series.objects.get(slug=slug)
def title(self, obj):
return u"%s: %s" % (settings.LIFEFLOW_BLOG_NAME,
obj.title)
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u"Entries in the %s series." % obj.title
def items(self, obj):
return obj.latest()
def item_pubdate(self, item):
return item.pub_date
class TranslationFeed(Feed):
title = u"%s: Translations" % settings.LIFEFLOW_BLOG_NAME
link = u"/"
description = u"Recent translationed entries."
copyright = u'Creative Commons License'
def items(self):
return Entry.objects.all().filter(**{'pub_date__lte': datetime.datetime.now()}).filter(**{'is_translation':True})
def item_pubdate(self, item):
return item.pub_date
class ProjectFeed(Feed):
title = u"%s: Projects" % settings.LIFEFLOW_BLOG_NAME
link = u"/"
description = u"Latest projects on %s." % settings.LIFEFLOW_BLOG_NAME
copyright = u'Creative Commons License'
def items(self):
return Project.objects.all().order_by('-id')
class CommentFeed(Feed):
title = u"%s: Comments" % settings.LIFEFLOW_BLOG_NAME
link = "/"
description = u"Latest comments on %s." % settings.LIFEFLOW_BLOG_NAME
copyright = u'Creative Commons License'
def items(self):
return Comment.objects.all().order_by('-date',)[:20]
def item_pubdate(self, item):
return item.date
class EntryCommentFeed(Feed):
def get_object(self, bits):
year = bits[0]
month = bits[1]
day = bits[2]
slug = bits[3]
return Entry.objects.get(pub_date__year=year,
pub_date__month=month,
pub_date__day=day,
slug=slug)
def title(self, obj):
return u"%s: Comments for %s" % (settings.LIFEFLOW_BLOG_NAME,
obj.title)
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u"Comments for %s." % obj.title
def items(self, obj):
return obj.comment_set.all().order_by('-date')
def item_pubdate(self, item):
return item.date
########NEW FILE########
__FILENAME__ = forms
import cgi
from django import forms
from lifeflow.text_filters import comment_markup
class CommentForm(forms.Form):
name = forms.CharField(required=False)
email = forms.CharField(required=False)
webpage = forms.CharField(required=False)
body = forms.CharField(widget=forms.Textarea, required=False)
def clean_name(self):
name = self.cleaned_data['name']
if name == u"":
name = u"name"
else:
name = cgi.escape(name)
return name
def clean_email(self):
email = self.cleaned_data['email']
if email == u"":
email = u"email"
else:
email = cgi.escape(email)
return email
def clean_webpage(self):
webpage = self.cleaned_data['webpage']
if webpage == u"":
webpage = u"webpage"
else:
webpage = cgi.escape(webpage)
if webpage.find('://') == -1: webpage = "http://%s" % webpage
return webpage
def clean_body(self):
body = self.cleaned_data['body']
self.cleaned_data['html'] = unicode(comment_markup(body))
return body
########NEW FILE########
__FILENAME__ = markdown
#!/usr/bin/env python
version = "1.7"
version_info = (1,7,0,"rc-1")
__revision__ = "$Rev: 66 $"
"""
Python-Markdown
===============
Converts Markdown to HTML. Basic usage as a module:
import markdown
md = Markdown()
html = md.convert(your_text_string)
See http://www.freewisdom.org/projects/python-markdown/ for more
information and instructions on how to extend the functionality of the
script. (You might want to read that before you try modifying this
file.)
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org) and [Waylan
Limberg](http://achinghead.com/).
Contact: yuri [at] freewisdom.org
waylan [at] gmail.com
License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD
"""
import re, sys, os, random, codecs
from logging import getLogger, StreamHandler, Formatter, \
DEBUG, INFO, WARN, ERROR, CRITICAL
MESSAGE_THRESHOLD = CRITICAL
# Configure debug message logger (the hard way - to support python 2.3)
logger = getLogger('MARKDOWN')
logger.setLevel(DEBUG) # This is restricted by handlers later
console_hndlr = StreamHandler()
formatter = Formatter('%(name)s-%(levelname)s: "%(message)s"')
console_hndlr.setFormatter(formatter)
console_hndlr.setLevel(MESSAGE_THRESHOLD)
logger.addHandler(console_hndlr)
def message(level, text):
''' A wrapper method for logging debug messages. '''
logger.log(level, text)
# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
TAB_LENGTH = 4 # expand tabs to this many spaces
ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
SMART_EMPHASIS = 1 # this_or_that does not become this<i>or</i>that
HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
# from Hebrew to Nko (includes Arabic, Syriac and Thaana)
(u'\u2D30', u'\u2D7F'),
# Tifinagh
)
# Unicode Reference Table:
# 0590-05FF - Hebrew
# 0600-06FF - Arabic
# 0700-074F - Syriac
# 0750-077F - Arabic Supplement
# 0780-07BF - Thaana
# 07C0-07FF - Nko
BOMS = { 'utf-8': (codecs.BOM_UTF8, ),
'utf-16': (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE),
#'utf-32': (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)
}
def removeBOM(text, encoding):
convert = isinstance(text, unicode)
for bom in BOMS[encoding]:
bom = convert and bom.decode(encoding) or bom
if text.startswith(bom):
return text.lstrip(bom)
return text
# The following constant specifies the name used in the usage
# statement displayed for python versions lower than 2.3. (With
# python2.3 and higher the usage statement is generated by optparse
# and uses the actual name of the executable called.)
EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
# --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ----------
# a template for html placeholders
HTML_PLACEHOLDER_PREFIX = "qaodmasdkwaspemas"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%dajkqlsmdqpakldnzsdfls"
BLOCK_LEVEL_ELEMENTS = ['p', 'div', 'blockquote', 'pre', 'table',
'dl', 'ol', 'ul', 'script', 'noscript',
'form', 'fieldset', 'iframe', 'math', 'ins',
'del', 'hr', 'hr/', 'style']
def is_block_level (tag):
return ( (tag in BLOCK_LEVEL_ELEMENTS) or
(tag[0] == 'h' and tag[1] in "0123456789") )
"""
======================================================================
========================== NANODOM ===================================
======================================================================
The three classes below implement some of the most basic DOM
methods. I use this instead of minidom because I need a simpler
functionality and do not want to require additional libraries.
Importantly, NanoDom does not do normalization, which is what we
want. It also adds extra white space when converting DOM to string
"""
ENTITY_NORMALIZATION_EXPRESSIONS = [ (re.compile("&"), "&"),
(re.compile("<"), "<"),
(re.compile(">"), ">"),
(re.compile("\""), """)]
ENTITY_NORMALIZATION_EXPRESSIONS_SOFT = [ (re.compile("&(?!\#)"), "&"),
(re.compile("<"), "<"),
(re.compile(">"), ">"),
(re.compile("\""), """)]
def getBidiType(text):
if not text: return None
ch = text[0]
if not isinstance(ch, unicode) or not ch.isalpha():
return None
else:
for min, max in RTL_BIDI_RANGES:
if ( ch >= min and ch <= max ):
return "rtl"
else:
return "ltr"
class Document:
def __init__ (self):
self.bidi = "ltr"
def appendChild(self, child):
self.documentElement = child
child.isDocumentElement = True
child.parent = self
self.entities = {}
def setBidi(self, bidi):
if bidi:
self.bidi = bidi
def createElement(self, tag, textNode=None):
el = Element(tag)
el.doc = self
if textNode:
el.appendChild(self.createTextNode(textNode))
return el
def createTextNode(self, text):
node = TextNode(text)
node.doc = self
return node
def createEntityReference(self, entity):
if entity not in self.entities:
self.entities[entity] = EntityReference(entity)
return self.entities[entity]
def createCDATA(self, text):
node = CDATA(text)
node.doc = self
return node
def toxml (self):
return self.documentElement.toxml()
def normalizeEntities(self, text, avoidDoubleNormalizing=False):
if avoidDoubleNormalizing:
regexps = ENTITY_NORMALIZATION_EXPRESSIONS_SOFT
else:
regexps = ENTITY_NORMALIZATION_EXPRESSIONS
for regexp, substitution in regexps:
text = regexp.sub(substitution, text)
return text
def find(self, test):
return self.documentElement.find(test)
def unlink(self):
self.documentElement.unlink()
self.documentElement = None
class CDATA:
type = "cdata"
def __init__ (self, text):
self.text = text
def handleAttributes(self):
pass
def toxml (self):
return "<![CDATA[" + self.text + "]]>"
class Element:
type = "element"
def __init__ (self, tag):
self.nodeName = tag
self.attributes = []
self.attribute_values = {}
self.childNodes = []
self.bidi = None
self.isDocumentElement = False
def setBidi(self, bidi):
if bidi:
orig_bidi = self.bidi
if not self.bidi or self.isDocumentElement:
# Once the bidi is set don't change it (except for doc element)
self.bidi = bidi
self.parent.setBidi(bidi)
def unlink(self):
for child in self.childNodes:
if child.type == "element":
child.unlink()
self.childNodes = None
def setAttribute(self, attr, value):
if not attr in self.attributes:
self.attributes.append(attr)
self.attribute_values[attr] = value
def insertChild(self, position, child):
self.childNodes.insert(position, child)
child.parent = self
def removeChild(self, child):
self.childNodes.remove(child)
def replaceChild(self, oldChild, newChild):
position = self.childNodes.index(oldChild)
self.removeChild(oldChild)
self.insertChild(position, newChild)
def appendChild(self, child):
self.childNodes.append(child)
child.parent = self
def handleAttributes(self):
pass
def find(self, test, depth=0):
""" Returns a list of descendants that pass the test function """
matched_nodes = []
for child in self.childNodes:
if test(child):
matched_nodes.append(child)
if child.type == "element":
matched_nodes += child.find(test, depth+1)
return matched_nodes
def toxml(self):
if ENABLE_ATTRIBUTES:
for child in self.childNodes:
child.handleAttributes()
buffer = ""
if self.nodeName in ['h1', 'h2', 'h3', 'h4']:
buffer += "\n"
elif self.nodeName in ['li']:
buffer += "\n "
# Process children FIRST, then do the attributes
childBuffer = ""
if self.childNodes or self.nodeName in ['blockquote']:
childBuffer += ">"
for child in self.childNodes:
childBuffer += child.toxml()
if self.nodeName == 'p':
childBuffer += "\n"
elif self.nodeName == 'li':
childBuffer += "\n "
childBuffer += "</%s>" % self.nodeName
else:
childBuffer += "/>"
buffer += "<" + self.nodeName
if self.nodeName in ['p', 'li', 'ul', 'ol',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if not self.attribute_values.has_key("dir"):
if self.bidi:
bidi = self.bidi
else:
bidi = self.doc.bidi
if bidi=="rtl":
self.setAttribute("dir", "rtl")
for attr in self.attributes:
value = self.attribute_values[attr]
value = self.doc.normalizeEntities(value,
avoidDoubleNormalizing=True)
buffer += ' %s="%s"' % (attr, value)
# Now let's actually append the children
buffer += childBuffer
if self.nodeName in ['p', 'br ', 'li', 'ul', 'ol',
'h1', 'h2', 'h3', 'h4'] :
buffer += "\n"
return buffer
class TextNode:
type = "text"
attrRegExp = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123}
def __init__ (self, text):
self.value = text
def attributeCallback(self, match):
self.parent.setAttribute(match.group(1), match.group(2))
def handleAttributes(self):
self.value = self.attrRegExp.sub(self.attributeCallback, self.value)
def toxml(self):
text = self.value
self.parent.setBidi(getBidiType(text))
if not text.startswith(HTML_PLACEHOLDER_PREFIX):
if self.parent.nodeName == "p":
text = text.replace("\n", "\n ")
elif (self.parent.nodeName == "li"
and self.parent.childNodes[0]==self):
text = "\n " + text.replace("\n", "\n ")
text = self.doc.normalizeEntities(text)
return text
class EntityReference:
type = "entity_ref"
def __init__(self, entity):
self.entity = entity
def handleAttributes(self):
pass
def toxml(self):
return "&" + self.entity + ";"
"""
======================================================================
========================== PRE-PROCESSORS ============================
======================================================================
Preprocessors munge source text before we start doing anything too
complicated.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list. Preprocessors
must extend markdown.Preprocessor.
"""
class Preprocessor:
pass
class HeaderPreprocessor (Preprocessor):
"""
Replaces underlined headers with hashed headers to avoid
the nead for lookahead later.
"""
def run (self, lines):
i = -1
while i+1 < len(lines):
i = i+1
if not lines[i].strip():
continue
if lines[i].startswith("#"):
lines.insert(i+1, "\n")
if (i+1 <= len(lines)
and lines[i+1]
and lines[i+1][0] in ['-', '=']):
underline = lines[i+1].strip()
if underline == "="*len(underline):
lines[i] = "# " + lines[i].strip()
lines[i+1] = ""
elif underline == "-"*len(underline):
lines[i] = "## " + lines[i].strip()
lines[i+1] = ""
return lines
HEADER_PREPROCESSOR = HeaderPreprocessor()
class LinePreprocessor (Preprocessor):
"""Deals with HR lines (needs to be done before processing lists)"""
blockquote_re = re.compile(r'^(> )+')
def run (self, lines):
for i in range(len(lines)):
prefix = ''
m = self.blockquote_re.search(lines[i])
if m : prefix = m.group(0)
if self._isLine(lines[i][len(prefix):]):
lines[i] = prefix + self.stash.store("<hr />", safe=True)
return lines
def _isLine(self, block):
"""Determines if a block should be replaced with an <:wHR>"""
if block.startswith(" "): return 0 # a code block
text = "".join([x for x in block if not x.isspace()])
if len(text) <= 2:
return 0
for pattern in ['isline1', 'isline2', 'isline3']:
m = RE.regExp[pattern].match(text)
if (m and m.group(1)):
return 1
else:
return 0
LINE_PREPROCESSOR = LinePreprocessor()
class HtmlBlockPreprocessor (Preprocessor):
"""Removes html blocks from self.lines"""
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
return block.rstrip()[-len(left_tag)-2:-1].lower()
def _equal_tags(self, left_tag, right_tag):
if left_tag in ['?', '?php', 'div']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run (self, text):
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
for block in text:
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag = self._get_right_tag(left_tag, block)
if not (is_block_level(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.stash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
items.append(block.strip())
in_tag = True
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.stash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.stash.store('\n\n'.join(items)))
new_blocks.append('\n')
return "\n\n".join(new_blocks)
HTML_BLOCK_PREPROCESSOR = HtmlBlockPreprocessor()
class ReferencePreprocessor (Preprocessor):
def run (self, lines):
new_text = [];
for line in lines:
m = RE.regExp['reference-def'].match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
REFERENCE_PREPROCESSOR = ReferencePreprocessor()
"""
======================================================================
========================== INLINE PATTERNS ===========================
======================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() - returns a regular expression
pattern.handleMatch(m, doc) - takes a match object and returns
a NanoDom node (as a part of the provided
doc) or None
All of python markdown's built-in patterns subclass from Patter,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'\`([^\`]*)\`' # `e= m*c^2`
DOUBLE_BACKTICK_RE = r'\`\`(.*)\`\`' # ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'\*([^\*]*)\*' # *emphasis*
STRONG_RE = r'\*\*(.*)\*\*' # **strong**
STRONG_EM_RE = r'\*\*\*([^_]*)\*\*\*' # ***strong***
if SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\S)_(\S[^_]*)_' # _emphasis_
else:
EMPHASIS_2_RE = r'_([^_]*)_' # _emphasis_
STRONG_2_RE = r'__([^_]*)__' # __strong__
STRONG_EM_2_RE = r'___([^_]*)___' # ___strong___
LINK_RE = NOIMG + BRK + r'\s*\(([^\)]*)\)' # [text](url)
LINK_ANGLED_RE = NOIMG + BRK + r'\s*\(<([^\)]*)>\)' # [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\(([^\)]*)\)' # 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'( \* )' # stand-alone * or _
AUTOLINK_RE = r'<(http://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <[email protected]>
#HTML_RE = r'(\<[^\>]*\>)' # <...>
HTML_RE = r'(\<[a-zA-Z/][^\>]*\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
class Pattern:
def __init__ (self, pattern):
self.pattern = pattern
self.compiled_re = re.compile("^(.*)%s(.*)$" % pattern, re.DOTALL)
def getCompiledRegExp (self):
return self.compiled_re
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
def handleMatch(self, m, doc):
return doc.createTextNode(m.group(2))
class SimpleTagPattern (Pattern):
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m, doc):
el = doc.createElement(self.tag)
el.appendChild(doc.createTextNode(m.group(2)))
return el
class SubstituteTagPattern (SimpleTagPattern):
def handleMatch (self, m, doc):
return doc.createElement(self.tag)
class BacktickPattern (Pattern):
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m, doc):
el = doc.createElement(self.tag)
text = m.group(2).strip()
#text = text.replace("&", "&")
el.appendChild(doc.createTextNode(text))
return el
class DoubleTagPattern (SimpleTagPattern):
def handleMatch(self, m, doc):
tag1, tag2 = self.tag.split(",")
el1 = doc.createElement(tag1)
el2 = doc.createElement(tag2)
el1.appendChild(el2)
el2.appendChild(doc.createTextNode(m.group(2)))
return el1
class HtmlPattern (Pattern):
def handleMatch (self, m, doc):
rawhtml = m.group(2)
inline = True
place_holder = self.stash.store(rawhtml)
return doc.createTextNode(place_holder)
class LinkPattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('a')
el.appendChild(doc.createTextNode(m.group(2)))
parts = m.group(9).split('"')
# We should now have [], [href], or [href, title]
if parts:
el.setAttribute('href', parts[0].strip())
else:
el.setAttribute('href', "")
if len(parts) > 1:
# we also got a title
title = '"' + '"'.join(parts[1:]).strip()
title = dequote(title) #.replace('"', """)
el.setAttribute('title', title)
return el
class ImagePattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('img')
src_parts = m.group(9).split()
if src_parts:
el.setAttribute('src', src_parts[0])
else:
el.setAttribute('src', "")
if len(src_parts) > 1:
el.setAttribute('title', dequote(" ".join(src_parts[1:])))
if ENABLE_ATTRIBUTES:
text = doc.createTextNode(m.group(2))
el.appendChild(text)
text.handleAttributes()
truealt = text.value
el.childNodes.remove(text)
else:
truealt = m.group(2)
el.setAttribute('alt', truealt)
return el
class ReferencePattern (Pattern):
def handleMatch(self, m, doc):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not self.references.has_key(id): # ignore undefined refs
return None
href, title = self.references[id]
text = m.group(2)
return self.makeTag(href, title, text, doc)
def makeTag(self, href, title, text, doc):
el = doc.createElement('a')
el.setAttribute('href', href)
if title:
el.setAttribute('title', title)
el.appendChild(doc.createTextNode(text))
return el
class ImageReferencePattern (ReferencePattern):
def makeTag(self, href, title, text, doc):
el = doc.createElement('img')
el.setAttribute('src', href)
if title:
el.setAttribute('title', title)
el.setAttribute('alt', text)
return el
class AutolinkPattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('a')
el.setAttribute('href', m.group(2))
el.appendChild(doc.createTextNode(m.group(2)))
return el
class AutomailPattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
for letter in email:
entity = doc.createEntityReference("#%d" % ord(letter))
el.appendChild(entity)
mailto = "mailto:" + email
mailto = "".join(['&#%d;' % ord(letter) for letter in mailto])
el.setAttribute('href', mailto)
return el
ESCAPE_PATTERN = SimpleTextPattern(ESCAPE_RE)
NOT_STRONG_PATTERN = SimpleTextPattern(NOT_STRONG_RE)
BACKTICK_PATTERN = BacktickPattern(BACKTICK_RE)
DOUBLE_BACKTICK_PATTERN = BacktickPattern(DOUBLE_BACKTICK_RE)
STRONG_PATTERN = SimpleTagPattern(STRONG_RE, 'strong')
STRONG_PATTERN_2 = SimpleTagPattern(STRONG_2_RE, 'strong')
EMPHASIS_PATTERN = SimpleTagPattern(EMPHASIS_RE, 'em')
EMPHASIS_PATTERN_2 = SimpleTagPattern(EMPHASIS_2_RE, 'em')
STRONG_EM_PATTERN = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
STRONG_EM_PATTERN_2 = DoubleTagPattern(STRONG_EM_2_RE, 'strong,em')
LINE_BREAK_PATTERN = SubstituteTagPattern(LINE_BREAK_RE, 'br ')
LINE_BREAK_PATTERN_2 = SubstituteTagPattern(LINE_BREAK_2_RE, 'br ')
LINK_PATTERN = LinkPattern(LINK_RE)
LINK_ANGLED_PATTERN = LinkPattern(LINK_ANGLED_RE)
IMAGE_LINK_PATTERN = ImagePattern(IMAGE_LINK_RE)
IMAGE_REFERENCE_PATTERN = ImageReferencePattern(IMAGE_REFERENCE_RE)
REFERENCE_PATTERN = ReferencePattern(REFERENCE_RE)
HTML_PATTERN = HtmlPattern(HTML_RE)
ENTITY_PATTERN = HtmlPattern(ENTITY_RE)
AUTOLINK_PATTERN = AutolinkPattern(AUTOLINK_RE)
AUTOMAIL_PATTERN = AutomailPattern(AUTOMAIL_RE)
"""
======================================================================
========================== POST-PROCESSORS ===========================
======================================================================
Markdown also allows post-processors, which are similar to
preprocessors in that they need to implement a "run" method. Unlike
pre-processors, they take a NanoDom document as a parameter and work
with that.
Post-Processor should extend markdown.Postprocessor.
There are currently no standard post-processors, but the footnote
extension below uses one.
"""
class Postprocessor:
pass
"""
======================================================================
======================== TEXT-POST-PROCESSORS ========================
======================================================================
Markdown also allows text-post-processors, which are similar to
textpreprocessors in that they need to implement a "run" method.
Unlike post-processors, they take a text string as a parameter and
should return a string.
Text-Post-Processors should extend markdown.Postprocessor.
"""
class RawHtmlTextPostprocessor(Postprocessor):
def __init__(self):
pass
def run(self, text):
for i in range(self.stash.html_counter):
html, safe = self.stash.rawHtmlBlocks[i]
if self.safeMode and not safe:
if str(self.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.safeMode).lower() == 'remove':
html = ''
else:
html = HTML_REMOVED_TEXT
text = text.replace("<p>%s\n</p>" % (HTML_PLACEHOLDER % i),
html + "\n")
text = text.replace(HTML_PLACEHOLDER % i, html)
return text
def escape(self, html):
''' Basic html escaping '''
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
return html.replace('"', '"')
RAWHTMLTEXTPOSTPROCESSOR = RawHtmlTextPostprocessor()
"""
======================================================================
========================== MISC AUXILIARY CLASSES ====================
======================================================================
"""
class HtmlStash:
"""This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders."""
def __init__ (self):
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
@param html: an html segment
@param safe: label an html segment as safe for safemode
@param inline: label a segmant as inline html
@returns : a placeholder string """
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
class BlockGuru:
def _findHead(self, lines, fn, allowBlank=0):
"""Functional magic to help determine boundaries of indented
blocks.
@param lines: an array of strings
@param fn: a function that returns a substring of a string
if the string matches the necessary criteria
@param allowBlank: specifies whether it's ok to have blank
lines between matching functions
@returns: a list of post processes items and the unused
remainder of the original list"""
items = []
item = -1
i = 0 # to keep track of where we are
for line in lines:
if not line.strip() and not allowBlank:
return items, lines[i:]
if not line.strip() and allowBlank:
# If we see a blank line, this _might_ be the end
i += 1
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next = lines[j]
break
else:
# There is no more text => this is the end
break
# Check if the next non-blank line is still a part of the list
part = fn(next)
if part:
items.append("")
continue
else:
break # found end of the list
part = fn(line)
if part:
items.append(part)
i += 1
continue
else:
return items, lines[i:]
else:
i += 1
return items, lines[i:]
def detabbed_fn(self, line):
""" An auxiliary method to be passed to _findHead """
m = RE.regExp['tabbed'].match(line)
if m:
return m.group(4)
else:
return None
def detectTabbed(self, lines):
return self._findHead(lines, self.detabbed_fn,
allowBlank = 1)
def print_error(string):
"""Print an error string to stderr"""
sys.stderr.write(string +'\n')
def dequote(string):
""" Removes quotes from around a string """
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
"""
======================================================================
========================== CORE MARKDOWN =============================
======================================================================
This stuff is ugly, so if you are thinking of extending the syntax,
see first if you can do it via pre-processors, post-processors,
inline patterns or a combination of the three.
"""
class CorePatterns:
"""This class is scheduled for removal as part of a refactoring
effort."""
patterns = {
'header': r'(#*)([^#]*)(#*)', # # A title
'reference-def': r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)',
# [Google]: http://www.google.com/
'containsline': r'([-]*)$|^([=]*)', # -----, =====, etc.
'ol': r'[ ]{0,3}[\d]*\.\s+(.*)', # 1. text
'ul': r'[ ]{0,3}[*+-]\s+(.*)', # "* text"
'isline1': r'(\**)', # ***
'isline2': r'(\-*)', # ---
'isline3': r'(\_*)', # ___
'tabbed': r'((\t)|( ))(.*)', # an indented line
'quoted': r'> ?(.*)', # a quoted block ("> ...")
}
def __init__ (self):
self.regExp = {}
for key in self.patterns.keys():
self.regExp[key] = re.compile("^%s$" % self.patterns[key],
re.DOTALL)
self.regExp['containsline'] = re.compile(r'^([-]*)$|^([=]*)$', re.M)
RE = CorePatterns()
class Markdown:
""" Markdown formatter class for creating an html document from
Markdown text """
def __init__(self, source=None, # depreciated
extensions=[],
extension_configs=None,
safe_mode = False):
"""Creates a new Markdown instance.
@param source: The text in Markdown format. Depreciated!
@param extensions: A list if extensions.
@param extension-configs: Configuration setting for extensions.
@param safe_mode: Disallow raw html. """
self.source = source
if source is not None:
message(WARN, "The `source` arg of Markdown.__init__() is depreciated and will be removed in the future. Use `instance.convert(source)` instead.")
self.safeMode = safe_mode
self.blockGuru = BlockGuru()
self.registeredExtensions = []
self.stripTopLevelTags = 1
self.docType = ""
self.textPreprocessors = [HTML_BLOCK_PREPROCESSOR]
self.preprocessors = [HEADER_PREPROCESSOR,
LINE_PREPROCESSOR,
# A footnote preprocessor will
# get inserted here
REFERENCE_PREPROCESSOR]
self.postprocessors = [] # a footnote postprocessor will get
# inserted later
self.textPostprocessors = [# a footnote postprocessor will get
# inserted here
RAWHTMLTEXTPOSTPROCESSOR]
self.prePatterns = []
self.inlinePatterns = [DOUBLE_BACKTICK_PATTERN,
BACKTICK_PATTERN,
ESCAPE_PATTERN,
REFERENCE_PATTERN,
LINK_ANGLED_PATTERN,
LINK_PATTERN,
IMAGE_LINK_PATTERN,
IMAGE_REFERENCE_PATTERN,
AUTOLINK_PATTERN,
AUTOMAIL_PATTERN,
LINE_BREAK_PATTERN_2,
LINE_BREAK_PATTERN,
HTML_PATTERN,
ENTITY_PATTERN,
NOT_STRONG_PATTERN,
STRONG_EM_PATTERN,
STRONG_EM_PATTERN_2,
STRONG_PATTERN,
STRONG_PATTERN_2,
EMPHASIS_PATTERN,
EMPHASIS_PATTERN_2
# The order of the handlers matters!!!
]
self.registerExtensions(extensions = extensions,
configs = extension_configs)
self.reset()
def registerExtensions(self, extensions, configs):
if not configs:
configs = {}
for module in extensions:
ext = module.__name__.split("_")[1]
if configs.has_key(ext):
configs_for_ext = configs[ext]
else:
configs_for_ext = []
extension = module.makeExtension(configs_for_ext)
extension.extendMarkdown(self, globals())
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
def reset(self):
"""Resets all state variables so that we can start
with a new text."""
self.references={}
self.htmlStash = HtmlStash()
HTML_BLOCK_PREPROCESSOR.stash = self.htmlStash
LINE_PREPROCESSOR.stash = self.htmlStash
REFERENCE_PREPROCESSOR.references = self.references
HTML_PATTERN.stash = self.htmlStash
ENTITY_PATTERN.stash = self.htmlStash
REFERENCE_PATTERN.references = self.references
IMAGE_REFERENCE_PATTERN.references = self.references
RAWHTMLTEXTPOSTPROCESSOR.stash = self.htmlStash
RAWHTMLTEXTPOSTPROCESSOR.safeMode = self.safeMode
for extension in self.registeredExtensions:
extension.reset()
def _transform(self):
"""Transforms the Markdown text into a XHTML body document
@returns: A NanoDom Document """
# Setup the document
self.doc = Document()
self.top_element = self.doc.createElement("span")
self.top_element.appendChild(self.doc.createTextNode('\n'))
self.top_element.setAttribute('class', 'markdown')
self.doc.appendChild(self.top_element)
# Fixup the source text
text = self.source
text = text.replace("\r\n", "\n").replace("\r", "\n")
text += "\n\n"
text = text.expandtabs(TAB_LENGTH)
# Split into lines and run the preprocessors that will work with
# self.lines
self.lines = text.split("\n")
# Run the pre-processors on the lines
for prep in self.preprocessors :
self.lines = prep.run(self.lines)
# Create a NanoDom tree from the lines and attach it to Document
buffer = []
for line in self.lines:
if line.startswith("#"):
self._processSection(self.top_element, buffer)
buffer = [line]
else:
buffer.append(line)
self._processSection(self.top_element, buffer)
#self._processSection(self.top_element, self.lines)
# Not sure why I put this in but let's leave it for now.
self.top_element.appendChild(self.doc.createTextNode('\n'))
# Run the post-processors
for postprocessor in self.postprocessors:
postprocessor.run(self.doc)
return self.doc
def _processSection(self, parent_elem, lines,
inList = 0, looseList = 0):
"""Process a section of a source document, looking for high
level structural elements like lists, block quotes, code
segments, html blocks, etc. Some those then get stripped
of their high level markup (e.g. get unindented) and the
lower-level markup is processed recursively.
@param parent_elem: A NanoDom element to which the content
will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
# Loop through lines until none left.
while lines:
# Check if this section starts with a list, a blockquote or
# a code block
processFn = { 'ul': self._processUList,
'ol': self._processOList,
'quoted': self._processQuote,
'tabbed': self._processCodeBlock}
for regexp in ['ul', 'ol', 'quoted', 'tabbed']:
m = RE.regExp[regexp].match(lines[0])
if m:
processFn[regexp](parent_elem, lines, inList)
return
# We are NOT looking at one of the high-level structures like
# lists or blockquotes. So, it's just a regular paragraph
# (though perhaps nested inside a list or something else). If
# we are NOT inside a list, we just need to look for a blank
# line to find the end of the block. If we ARE inside a
# list, however, we need to consider that a sublist does not
# need to be separated by a blank line. Rather, the following
# markup is legal:
#
# * The top level list item
#
# Another paragraph of the list. This is where we are now.
# * Underneath we might have a sublist.
#
if inList:
start, lines = self._linesUntil(lines, (lambda line:
RE.regExp['ul'].match(line)
or RE.regExp['ol'].match(line)
or not line.strip()))
self._processSection(parent_elem, start,
inList - 1, looseList = looseList)
inList = inList-1
else: # Ok, so it's just a simple block
paragraph, lines = self._linesUntil(lines, lambda line:
not line.strip())
if len(paragraph) and paragraph[0].startswith('#'):
self._processHeader(parent_elem, paragraph)
elif paragraph:
self._processParagraph(parent_elem, paragraph,
inList, looseList)
if lines and not lines[0].strip():
lines = lines[1:] # skip the first (blank) line
def _processHeader(self, parent_elem, paragraph):
m = RE.regExp['header'].match(paragraph[0])
if m:
level = len(m.group(1))
h = self.doc.createElement("h%d" % level)
parent_elem.appendChild(h)
for item in self._handleInlineWrapper(m.group(2).strip()):
h.appendChild(item)
else:
message(CRITICAL, "We've got a problem header!")
def _processParagraph(self, parent_elem, paragraph, inList, looseList):
list = self._handleInlineWrapper("\n".join(paragraph))
if ( parent_elem.nodeName == 'li'
and not (looseList or parent_elem.childNodes)):
# If this is the first paragraph inside "li", don't
# put <p> around it - append the paragraph bits directly
# onto parent_elem
el = parent_elem
else:
# Otherwise make a "p" element
el = self.doc.createElement("p")
parent_elem.appendChild(el)
for item in list:
el.appendChild(item)
def _processUList(self, parent_elem, lines, inList):
self._processList(parent_elem, lines, inList,
listexpr='ul', tag = 'ul')
def _processOList(self, parent_elem, lines, inList):
self._processList(parent_elem, lines, inList,
listexpr='ol', tag = 'ol')
def _processList(self, parent_elem, lines, inList, listexpr, tag):
"""Given a list of document lines starting with a list item,
finds the end of the list, breaks it up, and recursively
processes each list item and the remainder of the text file.
@param parent_elem: A dom element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
ul = self.doc.createElement(tag) # ul might actually be '<ol>'
parent_elem.appendChild(ul)
looseList = 0
# Make a list of list items
items = []
item = -1
i = 0 # a counter to keep track of where we are
for line in lines:
loose = 0
if not line.strip():
# If we see a blank line, this _might_ be the end of the list
i += 1
loose = 1
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next = lines[j]
break
else:
# There is no more text => end of the list
break
# Check if the next non-blank line is still a part of the list
if ( RE.regExp['ul'].match(next) or
RE.regExp['ol'].match(next) or
RE.regExp['tabbed'].match(next) ):
# get rid of any white space in the line
items[item].append(line.strip())
looseList = loose or looseList
continue
else:
break # found end of the list
# Now we need to detect list items (at the current level)
# while also detabing child elements if necessary
for expr in ['ul', 'ol', 'tabbed']:
m = RE.regExp[expr].match(line)
if m:
if expr in ['ul', 'ol']: # We are looking at a new item
#if m.group(1) :
# Removed the check to allow for a blank line
# at the beginning of the list item
items.append([m.group(1)])
item += 1
elif expr == 'tabbed': # This line needs to be detabbed
items[item].append(m.group(4)) #after the 'tab'
i += 1
break
else:
items[item].append(line) # Just regular continuation
i += 1 # added on 2006.02.25
else:
i += 1
# Add the dom elements
for item in items:
li = self.doc.createElement("li")
ul.appendChild(li)
self._processSection(li, item, inList + 1, looseList = looseList)
# Process the remaining part of the section
self._processSection(parent_elem, lines[i:], inList)
def _linesUntil(self, lines, condition):
""" A utility function to break a list of lines upon the
first line that satisfied a condition. The condition
argument should be a predicate function.
"""
i = -1
for line in lines:
i += 1
if condition(line): break
else:
i += 1
return lines[:i], lines[i:]
def _processQuote(self, parent_elem, lines, inList):
"""Given a list of document lines starting with a quote finds
the end of the quote, unindents it and recursively
processes the body of the quote and the remainder of the
text file.
@param parent_elem: DOM element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None """
dequoted = []
i = 0
blank_line = False # allow one blank line between paragraphs
for line in lines:
m = RE.regExp['quoted'].match(line)
if m:
dequoted.append(m.group(1))
i += 1
blank_line = False
elif not blank_line and line.strip() != '':
dequoted.append(line)
i += 1
elif not blank_line and line.strip() == '':
dequoted.append(line)
i += 1
blank_line = True
else:
break
blockquote = self.doc.createElement('blockquote')
parent_elem.appendChild(blockquote)
self._processSection(blockquote, dequoted, inList)
self._processSection(parent_elem, lines[i:], inList)
def _processCodeBlock(self, parent_elem, lines, inList):
"""Given a list of document lines starting with a code block
finds the end of the block, puts it into the dom verbatim
wrapped in ("<pre><code>") and recursively processes the
the remainder of the text file.
@param parent_elem: DOM element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
detabbed, theRest = self.blockGuru.detectTabbed(lines)
pre = self.doc.createElement('pre')
code = self.doc.createElement('code')
parent_elem.appendChild(pre)
pre.appendChild(code)
text = "\n".join(detabbed).rstrip()+"\n"
#text = text.replace("&", "&")
code.appendChild(self.doc.createTextNode(text))
self._processSection(parent_elem, theRest, inList)
def _handleInlineWrapper (self, line, patternIndex=0):
parts = [line]
while patternIndex < len(self.inlinePatterns):
i = 0
while i < len(parts):
x = parts[i]
if isinstance(x, (str, unicode)):
result = self._applyPattern(x, \
self.inlinePatterns[patternIndex], \
patternIndex)
if result:
i -= 1
parts.remove(x)
for y in result:
parts.insert(i+1,y)
i += 1
patternIndex += 1
for i in range(len(parts)):
x = parts[i]
if isinstance(x, (str, unicode)):
parts[i] = self.doc.createTextNode(x)
return parts
def _handleInline(self, line):
"""Transform a Markdown line with inline elements to an XHTML
fragment.
This function uses auxiliary objects called inline patterns.
See notes on inline patterns above.
@param item: A block of Markdown text
@return: A list of NanoDom nodes """
if not(line):
return [self.doc.createTextNode(' ')]
for pattern in self.inlinePatterns:
list = self._applyPattern( line, pattern)
if list: return list
return [self.doc.createTextNode(line)]
def _applyPattern(self, line, pattern, patternIndex):
""" Given a pattern name, this function checks if the line
fits the pattern, creates the necessary elements, and returns
back a list consisting of NanoDom elements and/or strings.
@param line: the text to be processed
@param pattern: the pattern to be checked
@returns: the appropriate newly created NanoDom element if the
pattern matches, None otherwise.
"""
# match the line to pattern's pre-compiled reg exp.
# if no match, move on.
m = pattern.getCompiledRegExp().match(line)
if not m:
return None
# if we got a match let the pattern make us a NanoDom node
# if it doesn't, move on
node = pattern.handleMatch(m, self.doc)
# check if any of this nodes have children that need processing
if isinstance(node, Element):
if not node.nodeName in ["code", "pre"]:
for child in node.childNodes:
if isinstance(child, TextNode):
result = self._handleInlineWrapper(child.value, patternIndex+1)
if result:
if result == [child]:
continue
result.reverse()
#to make insertion easier
position = node.childNodes.index(child)
node.removeChild(child)
for item in result:
if isinstance(item, (str, unicode)):
if len(item) > 0:
node.insertChild(position,
self.doc.createTextNode(item))
else:
node.insertChild(position, item)
if node:
# Those are in the reverse order!
return ( m.groups()[-1], # the string to the left
node, # the new node
m.group(1)) # the string to the right of the match
else:
return None
def convert (self, source = None):
"""Return the document in XHTML format.
@returns: A serialized XHTML body."""
if source is not None: #Allow blank string
self.source = source
if not self.source:
return u""
try:
self.source = unicode(self.source)
except UnicodeDecodeError:
message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
for pp in self.textPreprocessors:
self.source = pp.run(self.source)
doc = self._transform()
xml = doc.toxml()
# Return everything but the top level tag
if self.stripTopLevelTags:
xml = xml.strip()[23:-7] + "\n"
for pp in self.textPostprocessors:
xml = pp.run(xml)
return (self.docType + xml).strip()
def __str__(self):
''' Report info about instance. Markdown always returns unicode. '''
if self.source is None:
status = 'in which no source text has been assinged.'
else:
status = 'which contains %d chars and %d line(s) of source.'%\
(len(self.source), self.source.count('\n')+1)
return 'An instance of "%s" %s'% (self.__class__, status)
__unicode__ = convert # markdown should always return a unicode string
# ====================================================================
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
message_threshold = CRITICAL,
safe = False):
global console_hndlr
console_hndlr.setLevel(message_threshold)
message(DEBUG, "input file: %s" % input)
if not encoding:
encoding = "utf-8"
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
text = removeBOM(text, encoding)
new_text = markdown(text, extensions, safe_mode = safe)
if output:
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(new_text)
output_file.close()
else:
sys.stdout.write(new_text.encode(encoding))
def markdown(text,
extensions = [],
safe_mode = False):
message(DEBUG, "in markdown.markdown(), received text:\n%s" % text)
extension_names = []
extension_configs = {}
for ext in extensions:
pos = ext.find("(")
if pos == -1:
extension_names.append(ext)
else:
name = ext[:pos]
extension_names.append(name)
pairs = [x.split("=") for x in ext[pos+1:-1].split(",")]
configs = [(x.strip(), y.strip()) for (x, y) in pairs]
extension_configs[name] = configs
md = Markdown(extensions=extension_names,
extension_configs=extension_configs,
safe_mode = safe_mode)
return md.convert(text)
class Extension:
def __init__(self, configs = {}):
self.config = configs
def getConfig(self, key):
if self.config.has_key(key):
return self.config[key][0]
else:
return ""
def getConfigInfo(self):
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
self.config[key][0] = value
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options():
try:
optparse = __import__("optparse")
except:
if len(sys.argv) == 2:
return {'input': sys.argv[1],
'output': None,
'message_threshold': CRITICAL,
'safe': False,
'extensions': [],
'encoding': None }
else:
print OPTPARSE_WARNING
return None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename",
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=60, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="same mode ('replace', 'remove' or 'escape' user's HTML tag)")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1:
parser.print_help()
return None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'message_threshold': options.verbose,
'safe': options.safe,
'extensions': options.extensions,
'encoding': options.encoding }
if __name__ == '__main__':
""" Run Markdown from the command line. """
options = parse_options()
#if os.access(inFile, os.R_OK):
if not options:
sys.exit(0)
markdownFromFile(**options)
########NEW FILE########
__FILENAME__ = mdx_code
import re
from lifeflow.markdown import markdown
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
class CodeExtension (markdown.Extension):
def __name__(self):
return u"code"
def extendMarkdown(self, md, md_global):
preprocessor = CodeBlockPreprocessor()
preprocessor.md = md
md.textPreprocessors.insert(0, preprocessor)
CODE_BLOCK_REGEX = re.compile(r"\r?\n(?P<spaces>[ ]*)(?P<fence>@{2,})[[ ]*(?P<syntax>[a-zA-Z0-9_+-]+)[ ]*(?P<linenos>[a-zA-Z]*)[ ]*\r?\n(?P<code>.*?)(?P=fence)[ ]*\r?\n?$", re.DOTALL | re.MULTILINE)
class CodeBlockPreprocessor :
def run (self, text):
while 1:
m = CODE_BLOCK_REGEX.search(text)
if not m: break;
spaces = len(m.group('spaces'))
lexer = get_lexer_by_name(m.group('syntax'))
linenos = m.group('linenos')
unspaced = [x[spaces:] for x in re.split('\r?\n', m.group('code'))]
color = highlight("\n".join(unspaced), lexer, HtmlFormatter(linenos=linenos))
placeholder = self.md.htmlStash.store(color, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], (' '*spaces)+placeholder, text[m.end():])
return text
def makeExtension(configs=None) :
return CodeExtension(configs=configs)
########NEW FILE########
__FILENAME__ = mdx_footnotes
"""
========================= FOOTNOTES =================================
This section adds footnote handling to markdown. It can be used as
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown()
method of FootnoteExtension. The method also registers the
extension to allow it's state to be reset by a call to reset()
method.
"""
FN_BACKLINK_TEXT = "zz1337820767766393qq"
from lifeflow.markdown import markdown
import re, random
class FootnoteExtension (markdown.Extension):
DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)')
SHORT_USE_RE = re.compile(r'\[\^([^\]]*)\]', re.M) # [^a]
def __init__ (self, configs) :
self.config = {'PLACE_MARKER' :
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"]}
for key, value in configs :
self.config[key][0] = value
self.reset()
def __name__(self):
return u"footnotes"
def extendMarkdown(self, md, md_globals) :
self.md = md
# Stateless extensions do not need to be registered
md.registerExtension(self)
# Insert a preprocessor before ReferencePreprocessor
index = md.preprocessors.index(md_globals['REFERENCE_PREPROCESSOR'])
preprocessor = FootnotePreprocessor(self)
preprocessor.md = md
md.preprocessors.insert(index, preprocessor)
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
index = md.inlinePatterns.index(md_globals['IMAGE_REFERENCE_PATTERN'])
md.inlinePatterns.insert(index, FootnotePattern(FOOTNOTE_RE, self))
# Insert a post-processor that would actually add the footnote div
postprocessor = FootnotePostprocessor(self)
postprocessor.extension = self
md.postprocessors.append(postprocessor)
textPostprocessor = FootnoteTextPostprocessor(self)
md.textPostprocessors.append(textPostprocessor)
def reset(self) :
# May be called by Markdown is state reset is desired
self.footnote_suffix = "-" + str(int(random.random()*1000000000))
self.used_footnotes={}
self.footnotes = {}
def findFootnotesPlaceholder(self, doc) :
def findFootnotePlaceholderFn(node=None, indent=0):
if node.type == 'text':
if node.value.find(self.getConfig("PLACE_MARKER")) > -1 :
return True
fn_div_list = doc.find(findFootnotePlaceholderFn)
if fn_div_list :
return fn_div_list[0]
def setFootnote(self, id, text) :
self.footnotes[id] = text
def makeFootnoteId(self, num) :
return 'fn%d%s' % (num, self.footnote_suffix)
def makeFootnoteRefId(self, num) :
return 'fnr%d%s' % (num, self.footnote_suffix)
def makeFootnotesDiv (self, doc) :
"""Creates the div with class='footnote' and populates it with
the text of the footnotes.
@returns: the footnote div as a dom element """
if not self.footnotes.keys() :
return None
div = doc.createElement("div")
div.setAttribute('class', 'footnote')
hr = doc.createElement("hr")
div.appendChild(hr)
ol = doc.createElement("ol")
div.appendChild(ol)
footnotes = [(self.used_footnotes[id], id)
for id in self.footnotes.keys()]
footnotes.sort()
for i, id in footnotes :
li = doc.createElement('li')
li.setAttribute('id', self.makeFootnoteId(i))
self.md._processSection(li, self.footnotes[id].split("\n"), looseList=1)
#li.appendChild(doc.createTextNode(self.footnotes[id]))
backlink = doc.createElement('a')
backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))
backlink.setAttribute('class', 'footnoteBackLink')
backlink.setAttribute('title',
'Jump back to footnote %d in the text' % 1)
backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))
if li.childNodes :
node = li.childNodes[-1]
if node.type == "text" :
li.appendChild(backlink)
elif node.nodeName == "p":
node.appendChild(backlink)
else:
p = doc.createElement('p')
p.appendChild(backlink)
li.appendChild(p)
ol.appendChild(li)
return div
class FootnotePreprocessor :
def __init__ (self, footnotes) :
self.footnotes = footnotes
def run(self, lines) :
self.blockGuru = markdown.BlockGuru()
lines = self._handleFootnoteDefinitions (lines)
# Make a hash of all footnote marks in the text so that we
# know in what order they are supposed to appear. (This
# function call doesn't really substitute anything - it's just
# a way to get a callback for each occurence.
text = "\n".join(lines)
self.footnotes.SHORT_USE_RE.sub(self.recordFootnoteUse, text)
return text.split("\n")
def recordFootnoteUse(self, match) :
id = match.group(1)
id = id.strip()
nextNum = len(self.footnotes.used_footnotes.keys()) + 1
self.footnotes.used_footnotes[id] = nextNum
def _handleFootnoteDefinitions(self, lines) :
"""Recursively finds all footnote definitions in the lines.
@param lines: a list of lines of text
@returns: a string representing the text with footnote
definitions removed """
i, id, footnote = self._findFootnoteDefinition(lines)
if id :
plain = lines[:i]
detabbed, theRest = self.blockGuru.detectTabbed(lines[i+1:])
self.footnotes.setFootnote(id,
footnote + "\n"
+ "\n".join(detabbed))
more_plain = self._handleFootnoteDefinitions(theRest)
return plain + [""] + more_plain
else :
return lines
def _findFootnoteDefinition(self, lines) :
"""Finds the first line of a footnote definition.
@param lines: a list of lines of text
@returns: the index of the line containing a footnote definition """
counter = 0
for line in lines :
m = self.footnotes.DEF_RE.match(line)
if m :
return counter, m.group(2), m.group(3)
counter += 1
return counter, None, None
class FootnotePattern (markdown.Pattern) :
def __init__ (self, pattern, footnotes) :
markdown.Pattern.__init__(self, pattern)
self.footnotes = footnotes
def handleMatch(self, m, doc) :
sup = doc.createElement('sup')
a = doc.createElement('a')
sup.appendChild(a)
id = m.group(2)
num = self.footnotes.used_footnotes[id]
sup.setAttribute('id', self.footnotes.makeFootnoteRefId(num))
a.setAttribute('href', '#' + self.footnotes.makeFootnoteId(num))
a.appendChild(doc.createTextNode(str(num)))
return sup
class FootnotePostprocessor (markdown.Postprocessor):
def __init__ (self, footnotes) :
self.footnotes = footnotes
def run(self, doc) :
footnotesDiv = self.footnotes.makeFootnotesDiv(doc)
if footnotesDiv :
fnPlaceholder = self.extension.findFootnotesPlaceholder(doc)
if fnPlaceholder :
fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv)
else :
doc.documentElement.appendChild(footnotesDiv)
class FootnoteTextPostprocessor (markdown.Postprocessor):
def __init__ (self, footnotes) :
self.footnotes = footnotes
def run(self, text) :
return text.replace(FN_BACKLINK_TEXT, "↩")
def makeExtension(configs=None) :
return FootnoteExtension(configs=configs)
########NEW FILE########
__FILENAME__ = mdx_foreign_formats
import re
from lifeflow.markdown import markdown
def smart_str(s, encoding='utf-8', errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
Borrowed and simplified for this purpose from `django.utils.encoding`.
"""
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
class ForeignFormatsExtension (markdown.Extension):
def __name__(self):
return u"foreign formats"
def extendMarkdown(self, md, md_global):
preprocessor = ForeignFormatsBlockPreprocessor()
preprocessor.md = md
md.textPreprocessors.insert(0, preprocessor)
FORMATTERS = {}
# Attempt to import textile formatter.
try:
# http://dealmeida.net/projects/textile/
import textile
def func(x):
return textile.textile(smart_str(x), encoding='utf-8', output='utf-8')
FORMATTERS["textile"] = func
except ImportError:
pass
# Attempt to import docutiles (ReST) formatter.
try:
# http://docutils.sf.net/
from docutils.core import publish_parts
def func(x):
return publish_parts(source=x,writer_name="html4css1")["fragment"]
FORMATTERS["rest"] = func
except ImportError:
pass
FOREIGN_FORMAT_BLOCK_REGEX = re.compile(r"^~~~(?P<format>\w*)\r?\n(?P<txt>.*?)^~~~$", re.DOTALL|re.MULTILINE)
class ForeignFormatsBlockPreprocessor :
def run (self, text):
while 1:
m = FOREIGN_FORMAT_BLOCK_REGEX.search(text)
if not m: break;
format = m.group('format').lower()
txt = m.group('txt')
if FORMATTERS.has_key(format):
func = FORMATTERS[format]
txt = func(txt)
placeholder = self.md.htmlStash.store(txt, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
return text
def makeExtension(configs=None) :
return ForeignFormatsExtension(configs=configs)
########NEW FILE########
__FILENAME__ = mdx_lifeflow
import re
from lifeflow.markdown import markdown
import lifeflow.models
class LifeflowExtension (markdown.Extension):
def __init__(self, entry):
self.entry = entry
def extendMarkdown(self, md, md_globals):
preprocessor = LifeflowPreprocessor(self.entry)
preprocessor.md = md
md.preprocessors.insert(0, preprocessor)
def reset(self):
pass
def make_syntax():
# note that the key is a tuple of the number of arguments,
# and the name of the reference before the first space.
# for example [refer year name] would be (2, u"refer")
# and [absurd] would be (0, u"absurd")
# the value is a function that accepts
# entry, str, and then N additional parameters where
# N is equal to the number of args specified in the
# tuple
# [this is my author bio][author]
def author(entry, str):
authors = entry.authors.all()
if len(authors) == 1:
return str % authors[0].get_absolute_url()
else:
return str % u"/author/"
# [this is the lifeflow tag ][tag lifeflow]
def tag(entry, str, slug):
t = lifeflow.models.Tag.objects.get(slug=slug)
return str % t.get_absolute_url()
# [this is the comment with primary key 123][comment 123]
def comment(entry, str, pk):
c = lifeflow.models.Comment.objects.get(pk=int(pk))
return str % c.get_absolute_url()
# [this is the project with slug magic-wand][project magic-wand]
def project(entry, str, slug):
p = lifeflow.models.Project.objects.get(slug=slug)
return str % p.get_absolute_url()
# [remember my previous entry?][previous]
def previous(entry, str):
if entry.__class__.__name__ == "Entry":
prev = entry.get_previous_article()
if prev is None:
return None
return str % prev.get_absolute_url()
# [Update: I clarified this in the next entry!][next]
def next(entry, str):
if entry.__class__.__name__ == "Entry":
nxt = entry.get_next_article()
if nxt is None:
return None
return str % nxt.get_absolute_url()
# [Check out the first entry in this series][series 1]
# [or the second entry!][series 2]
def series_number(entry, str, nth):
try:
nth = int(nth)
if nth > 0:
nth = nth - 1
except ValueError:
return None
series = entry.series.all()[0]
if series:
try:
e = series.entry_set.all().order_by('pub_date')[nth]
return str % e.get_absolute_url()
except IndexError:
return None
# [Remember the Two-Faced Django series?][series two_faced 1]
# [Well, I wrote that too! Go me.][series jet-survival 3]
def series_slug_number(entry, str, slug, nth):
try:
nth = int(nth)
if nth > 0:
nth = nth - 1
except ValueError:
return None
try:
series = lifeflow.models.Series.objects.get(slug=slug)
except lifeflow.models.Series.DoesNotExist:
return None
try:
e = series.entry_set.all()[nth]
return str % e.get_absolute_url()
except IndexError:
return None
# [and check out this code!][file the_name]
# ![ a picture that I really like][file my_pic]
# ![ and you can abreviate it][f my_pic]
# [this way too][f my_code]
def file(entry, str, name):
try:
resource = lifeflow.models.Resource.objects.get(markdown_id=name)
return str % resource.get_relative_url()
except lifeflow.models.Resource.DoesNotExist:
return None
# [I like markdown][history md]
# [and talk about why the lucky stiff occasionally][history why]
# [but history is long... so...][h why]
# [and a link to my svn][h svn_lethain]
def history(entry, str, name):
pass
syntax = {}
syntax[(0, u"previous")] = previous
syntax[(0, u"next")] = next
syntax[(0, u"author")] = author
syntax[(1, u"file")] = file
syntax[(1, u"f")] = file
syntax[(1, u"tag")] = tag
syntax[(1, u"comment")] = comment
syntax[(1, u"project")] = project
syntax[(1, u"series")] = series_number
syntax[(2, u"series")] = series_slug_number
return syntax
class LifeflowPreprocessor :
def __init__(self, entry):
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
LIFEFLOW_RE = BRK + r'\s*\[([^\]]*)\]'
self.LIFEFLOW_RE = re.compile(LIFEFLOW_RE)
self.entry = entry
self.tags = {}
self.syntax = make_syntax()
def process_dynamic(self, ref):
# if tag has already been built, ignore
if self.tags.has_key(ref):
return None
parts = ref.split(u" ")
name = parts[0]
args = parts[1:]
length = len(args)
format = u"[%s]: %s" % (ref, u"%s")
try:
func = self.syntax[(length, name)]
result = func(self.entry, format, *args)
self.tags[ref] = True
return result
except KeyError:
self.tags[ref] = False
to_return = None
def build_static_references(self):
raw_refs = ((u'comments', u"#comments", u"Comments"),
(u'projects', u"/projects/", "Projects"),
(u'series', u"/articles/", "Series"),
(u'tags', u"/tags/", "Tags"))
refs = [ u'[%s]: %s "%s"' % (x[0], x[1], x[2]) for x in raw_refs ]
return refs
def run (self, lines):
def clean(match):
return match[-1]
text = u"\n".join(lines)
refs = self.LIFEFLOW_RE.findall(text)
cleaned = [ clean(x) for x in refs ]
processed = [ self.process_dynamic(x) for x in cleaned]
dynamic_refs = [ x for x in processed if x is not None ]
static_refs = self.build_static_references()
return static_refs + dynamic_refs + lines
def makeExtension(configs=None) :
return LifeflowExtension(configs)
########NEW FILE########
__FILENAME__ = mdx_rss
import markdown
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading) :
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time) :
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text) :
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals) :
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a post-processor that would actually add the title tag
postprocessor = RssPostProcessor(self)
postprocessor.ext = self
md.postprocessors.append(postprocessor)
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssPostProcessor (markdown.Postprocessor):
def __init__(self, md) :
pass
def run (self, doc) :
oldDocElement = doc.documentElement
rss = doc.createElement("rss")
rss.setAttribute('version', '2.0')
doc.appendChild(rss)
channel = doc.createElement("channel")
rss.appendChild(channel)
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
channel.appendChild(doc.createElement(tag, textNode = text))
for child in oldDocElement.childNodes :
if child.type == "element" :
if child.nodeName in ["h1", "h2", "h3", "h4", "h5"] :
heading = child.childNodes[0].value.strip()
item = doc.createElement("item")
channel.appendChild(item)
item.appendChild(doc.createElement("link",
self.ext.getConfig("URL")))
item.appendChild(doc.createElement("title", heading))
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = doc.createElement("guid", guid)
guidElem.setAttribute("isPermaLink", "false")
item.appendChild(guidElem)
elif child.nodeName in ["p"] :
description = doc.createElement("description")
content = "\n".join([node.toxml()
for node in child.childNodes])
cdata = doc.createCDATA(content)
description.appendChild(cdata)
if item :
item.appendChild(description)
def makeExtension(configs) :
return RssExtension(configs)
########NEW FILE########
__FILENAME__ = odt2txt
"""
ODT2TXT
=======
ODT2TXT convers files in Open Document Text format (ODT) into
Markdown-formatted plain text.
Writteby by [Yuri Takhteyev](http://www.freewisdom.org).
Project website: http://www.freewisdom.org/projects/python-markdown/odt2txt.php
Contact: yuri [at] freewisdom.org
License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD
Version: 0.1 (April 7, 2006)
"""
import sys, zipfile, xml.dom.minidom
IGNORED_TAGS = ["office:annotation"]
FOOTNOTE_STYLES = ["Footnote"]
class TextProps :
""" Holds properties for a text style. """
def __init__ (self):
self.italic = False
self.bold = False
self.fixed = False
def setItalic (self, value) :
if value == "italic" :
self.italic = True
def setBold (self, value) :
if value == "bold" :
self.bold = True
def setFixed (self, value) :
self.fixed = value
def __str__ (self) :
return "[i=%s, h=i%s, fixed=%s]" % (str(self.italic),
str(self.bold),
str(self.fixed))
class ParagraphProps :
""" Holds properties of a paragraph style. """
def __init__ (self):
self.blockquote = False
self.headingLevel = 0
self.code = False
self.title = False
self.indented = 0
def setIndented (self, value) :
self.indented = value
def setHeading (self, level) :
self.headingLevel = level
def setTitle (self, value):
self.title = value
def setCode (self, value) :
self.code = value
def __str__ (self) :
return "[bq=%s, h=%d, code=%s]" % (str(self.blockquote),
self.headingLevel,
str(self.code))
class ListProperties :
""" Holds properties for a list style. """
def __init__ (self):
self.ordered = False
def setOrdered (self, value) :
self.ordered = value
class OpenDocumentTextFile :
def __init__ (self, filepath) :
self.footnotes = []
self.footnoteCounter = 0
self.textStyles = {"Standard" : TextProps()}
self.paragraphStyles = {"Standard" : ParagraphProps()}
self.listStyles = {}
self.fixedFonts = []
self.hasTitle = 0
self.load(filepath)
def processFontDeclarations (self, fontDecl) :
""" Extracts necessary font information from a font-declaration
element.
"""
for fontFace in fontDecl.getElementsByTagName("style:font-face") :
if fontFace.getAttribute("style:font-pitch") == "fixed" :
self.fixedFonts.append(fontFace.getAttribute("style:name"))
def extractTextProperties (self, style, parent=None) :
""" Extracts text properties from a style element. """
textProps = TextProps()
if parent :
parentProp = self.textStyles.get(parent, None)
if parentProp :
textProp = parentProp
textPropEl = style.getElementsByTagName("style:text-properties")
if not textPropEl : return textProps
textPropEl = textPropEl[0]
italic = textPropEl.getAttribute("fo:font-style")
bold = textPropEl.getAttribute("fo:font-weight")
textProps.setItalic(italic)
textProps.setBold(bold)
if textPropEl.getAttribute("style:font-name") in self.fixedFonts :
textProps.setFixed(True)
return textProps
def extractParagraphProperties (self, style, parent=None) :
""" Extracts paragraph properties from a style element. """
paraProps = ParagraphProps()
name = style.getAttribute("style:name")
if name.startswith("Heading_20_") :
level = name[11:]
try :
level = int(level)
paraProps.setHeading(level)
except :
level = 0
if name == "Title" :
paraProps.setTitle(True)
paraPropEl = style.getElementsByTagName("style:paragraph-properties")
if paraPropEl :
paraPropEl = paraPropEl[0]
leftMargin = paraPropEl.getAttribute("fo:margin-left")
if leftMargin :
try :
leftMargin = float(leftMargin[:-2])
if leftMargin > 0.01 :
paraProps.setIndented(True)
except :
pass
textProps = self.extractTextProperties(style)
if textProps.fixed :
paraProps.setCode(True)
return paraProps
def processStyles(self, styleElements) :
""" Runs through "style" elements extracting necessary information.
"""
for style in styleElements :
name = style.getAttribute("style:name")
if name == "Standard" : continue
family = style.getAttribute("style:family")
parent = style.getAttribute("style:parent-style-name")
if family == "text" :
self.textStyles[name] = self.extractTextProperties(style,
parent)
elif family == "paragraph":
self.paragraphStyles[name] = (
self.extractParagraphProperties(style,
parent))
def processListStyles (self, listStyleElements) :
for style in listStyleElements :
name = style.getAttribute("style:name")
prop = ListProperties()
if style.childNodes :
if ( style.childNodes[0].tagName
== "text:list-level-style-number" ) :
prop.setOrdered(True)
self.listStyles[name] = prop
def load(self, filepath) :
""" Loads an ODT file. """
zip = zipfile.ZipFile(filepath)
styles_doc = xml.dom.minidom.parseString(zip.read("styles.xml"))
self.processFontDeclarations(styles_doc.getElementsByTagName(
"office:font-face-decls")[0])
self.processStyles(styles_doc.getElementsByTagName("style:style"))
self.processListStyles(styles_doc.getElementsByTagName(
"text:list-style"))
self.content = xml.dom.minidom.parseString(zip.read("content.xml"))
self.processFontDeclarations(self.content.getElementsByTagName(
"office:font-face-decls")[0])
self.processStyles(self.content.getElementsByTagName("style:style"))
self.processListStyles(self.content.getElementsByTagName(
"text:list-style"))
def compressCodeBlocks(self, text) :
""" Removes extra blank lines from code blocks. """
lines = text.split("\n")
buffer = ""
numLines = len(lines)
for i in range(numLines) :
if (lines[i].strip() or i == numLines-1 or i == 0 or
not ( lines[i-1].startswith(" ")
and lines[i+1].startswith(" ") ) ):
buffer += "\n" + lines[i]
return buffer
def listToString (self, listElement) :
buffer = ""
styleName = listElement.getAttribute("text:style-name")
props = self.listStyles.get(styleName, ListProperties())
i = 0
for item in listElement.childNodes :
i += 1
if props.ordered :
number = str(i)
number = number + "." + " "*(2-len(number))
buffer += number + self.paragraphToString(item.childNodes[0],
indent=3)
else :
buffer += "* " + self.paragraphToString(item.childNodes[0],
indent=2)
buffer += "\n\n"
return buffer
def toString (self) :
""" Converts the document to a string. """
body = self.content.getElementsByTagName("office:body")[0]
text = self.content.getElementsByTagName("office:text")[0]
buffer = u""
paragraphs = [el for el in text.childNodes
if el.tagName in ["text:p", "text:h",
"text:list"]]
for paragraph in paragraphs :
if paragraph.tagName == "text:list" :
text = self.listToString(paragraph)
else :
text = self.paragraphToString(paragraph)
if text :
buffer += text + "\n\n"
if self.footnotes :
buffer += "--------\n\n"
for cite, body in self.footnotes :
buffer += "[^%s]: %s\n\n" % (cite, body)
return self.compressCodeBlocks(buffer)
def textToString(self, element) :
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
tag = node.tagName
if tag == "text:span" :
text = self.textToString(node)
if not text.strip() :
return "" # don't apply styles to white space
styleName = node.getAttribute("text:style-name")
style = self.textStyles.get(styleName, None)
#print styleName, str(style)
if style.fixed :
buffer += "`" + text + "`"
continue
if style :
if style.italic and style.bold :
mark = "***"
elif style.italic :
mark = "_"
elif style.bold :
mark = "**"
else :
mark = ""
else :
mark = "<" + styleName + ">"
buffer += "%s%s%s" % (mark, text, mark)
elif tag == "text:note" :
cite = (node.getElementsByTagName("text:note-citation")[0]
.childNodes[0].nodeValue)
body = (node.getElementsByTagName("text:note-body")[0]
.childNodes[0])
self.footnotes.append((cite, self.textToString(body)))
buffer += "[^%s]" % cite
elif tag in IGNORED_TAGS :
pass
elif tag == "text:s" :
try :
num = int(node.getAttribute("text:c"))
buffer += " "*num
except :
buffer += " "
elif tag == "text:tab" :
buffer += " "
elif tag == "text:a" :
text = self.textToString(node)
link = node.getAttribute("xlink:href")
buffer += "[%s](%s)" % (text, link)
else :
buffer += " {" + tag + "} "
return buffer
def paragraphToString(self, paragraph, indent = 0) :
style_name = paragraph.getAttribute("text:style-name")
paraProps = self.paragraphStyles.get(style_name) #, None)
text = self.textToString(paragraph)
#print style_name
if paraProps and not paraProps.code :
text = text.strip()
if paraProps.title :
self.hasTitle = 1
return text + "\n" + ("=" * len(text))
if paraProps.headingLevel :
level = paraProps.headingLevel
if self.hasTitle : level += 1
if level == 1 :
return text + "\n" + ("=" * len(text))
elif level == 2 :
return text + "\n" + ("-" * len(text))
else :
return "#" * level + " " + text
elif paraProps.code :
lines = [" %s" % line for line in text.split("\n")]
return "\n".join(lines)
if paraProps.indented :
return self.wrapParagraph(text, indent = indent, blockquote = True)
else :
return self.wrapParagraph(text, indent = indent)
def wrapParagraph(self, text, indent = 0, blockquote=False) :
counter = 0
buffer = ""
LIMIT = 50
if blockquote :
buffer += "> "
for token in text.split() :
if counter > LIMIT - indent :
buffer += "\n" + " "*indent
if blockquote :
buffer += "> "
counter = 0
buffer += token + " "
counter += len(token)
return buffer
if __name__ == "__main__" :
odt = OpenDocumentTextFile(sys.argv[1])
#print odt.fixedFonts
#sys.exit(0)
#out = open("out.txt", "wb")
unicode = odt.toString()
out_utf8 = unicode.encode("utf-8")
sys.stdout.write(out_utf8)
#out.write(
########NEW FILE########
__FILENAME__ = models
import datetime, copy, xmlrpclib, thread, time
from django.db import models
from django.core.cache import cache
from django.contrib.sitemaps import ping_google
from django.contrib.sites.models import Site
from django.dispatch import Signal
from django.db.models import signals
from django.core.mail import mail_admins
from lifeflow.text_filters import entry_markup
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(
help_text="Automatically built from author's name.",
)
link = models.CharField(
max_length=200,
help_text="Link to author's website.")
bio = models.TextField(
blank=True, null=True,
help_text="Bio of author, written in markdown format."
)
picture = models.FileField(
upload_to="lifeflow/author", blank=True, null=True,
help_text="Picture of author. For best visual appearance should be relatively small (200px by 200px or so)."
)
use_markdown = models.BooleanField(
default=True,
help_text="If true body is filtered using MarkDown, otherwise html is expected.",
)
class Meta:
ordering = ('id',)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return u"/author/%s/" % self.slug
def latest(self, qty=10):
return self.entry_set.all().filter(**{'pub_date__lte': datetime.datetime.now()})[:qty]
def name_with_link(self):
return u'<a href="%s">%s</a>' % (self.get_absolute_url(), self.name)
class Comment(models.Model):
entry = models.ForeignKey('Entry')
parent = models.ForeignKey('Comment', blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
email = models.CharField(max_length=100, blank=True, null=True)
webpage = models.CharField(max_length=100, blank=True, null=True)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
html = models.TextField(blank=True, null=True)
class Meta:
ordering = ('-date',)
def save(self):
if self.name == u"name" or self.name == u"":
self.name = u"anonymous"
if self.webpage == u"http://webpage" or self.webpage == u"http://":
# better to check for valid URL
self.webpage = None
if self.email == u"email":
# better to check for valid email address
self.email = None
title = self.entry.title
subject = u"[Comment] %s on %s" % (self.name, self.entry.title)
body = u"Comment by %s [%s][%s] on %s\n\n%s" % (self.name, self.email, self.webpage, title, self.html)
mail_admins(subject, body, fail_silently=True)
super(Comment,self).save()
def get_absolute_url(self):
return u"%s#comment_%s" % (self.entry.get_absolute_url(), self.pk)
def __unicode__(self):
name = self.name or "Unnamed Poster"
title = self.entry.title or "Unnamed Entry"
return u": ".join((name, title))
class Draft(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
slug = models.SlugField(unique_for_date='pub_date',
blank=True, null=True)
summary = models.TextField(blank=True, null=True)
body = models.TextField(blank=True, null=True)
pub_date = models.DateTimeField(blank=True, null=True)
edited = models.BooleanField(default=False)
use_markdown = models.BooleanField(default=True)
is_translation = models.BooleanField(default=False)
send_ping = models.BooleanField(default=False)
allow_comments = models.BooleanField(default=True)
flows = models.ManyToManyField('Flow', blank=True, null=True)
tags = models.ManyToManyField('Tag', blank=True, null=True)
series = models.ManyToManyField('Series', blank=True, null=True)
authors = models.ManyToManyField('Author', blank=True, null=True)
def __unicode__(self):
if self.title:
return self.title
else:
return "Untitled Draft"
class CurrentEntryManager(models.Manager):
def get_query_set(self):
return super(CurrentEntryManager, self).get_query_set().filter(**{'pub_date__lte': datetime.datetime.now()}).filter(**{'is_translation':False})
class Entry(models.Model):
title = models.CharField(
max_length=200,
help_text='Name of this entry.'
)
slug = models.SlugField(
unique_for_date='pub_date',
help_text='Automatically built from the title.'
)
summary = models.TextField(help_text="One paragraph. Don't add <p> tag.")
body = models.TextField(
help_text='Use <a href="http://daringfireball.net/projects/markdown/syntax">Markdown-syntax</a>'
)
body_html = models.TextField(blank=True, null=True)
pub_date = models.DateTimeField(
help_text='If the date and time combination is in the future, the entry will not be visible until after that moment has passed.'
)
use_markdown = models.BooleanField(
default=True,
help_text="If true body is filtered using MarkDown++, otherwise no filtering is applied.",
)
is_translation = models.BooleanField(
default=False,
help_text="Only used to add articles to the translation feed.",
)
send_ping = models.BooleanField(
default=False,
help_text="If true will ping Google and any sites you have specified on saves."
)
allow_comments = models.BooleanField(
default=True,
help_text="If true users may add comments on this entry.",
)
flows = models.ManyToManyField(
'Flow', blank=True, null=True,
help_text="Determine which pages and feeds to show entry on.",
)
tags = models.ManyToManyField(
'Tag', blank=True, null=True,
help_text="Select tags to associate with this entry.",
)
series = models.ManyToManyField(
'Series', blank=True, null=True,
help_text='Used to associated groups of entries together under one theme.',
)
resources = models.ManyToManyField(
'Resource', blank=True, null=True,
help_text='Files or images used in entries. MarkDown links are automatically generated.',
)
authors = models.ManyToManyField(
'Author', blank=True, null=True,
help_text='The authors associated with this entry.',
)
# main manager, allows access to all entries, required primarily for admin functionality
objects = models.Manager()
# current manager, does not allow access entries published to future dates
current = CurrentEntryManager()
class Meta:
ordering = ('-pub_date',)
get_latest_by = 'pub_date'
verbose_name_plural = "entries"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return u"/entry/%s/%s/" % (
self.pub_date.strftime("%Y/%b/%d").lower(),
self.slug,
)
def save(self):
if self.use_markdown:
self.body_html = entry_markup(self.body, self)
else:
self.body_html = self.body
if self.send_ping is True: self.ping()
super(Entry,self).save()
def ping(self):
# ping all sites to ping (Ping-O-Matic, etc)
for site in SiteToNotify.objects.all():
site.ping()
# inform Google sitemap has changed
try:
ping_google()
except Exception:
pass
def get_next_article(self):
next = Entry.current.filter(**{'pub_date__gt': self.pub_date}).order_by('pub_date')
try:
return next[0]
except IndexError:
return None
def get_previous_article(self):
previous = Entry.current.filter(**{'pub_date__lt': self.pub_date}).order_by('-pub_date')
try:
return previous[0]
except IndexError:
return None
def get_random_entries(self):
return Entry.current.order_by('?')[:3]
def get_recent_comments(self, qty=3):
return Comment.objects.all().filter(entry=self)[:qty]
def organize_comments(self):
"""
Used to create a list of threaded comments.
This is a bit tricky since we only know the parent for
each comment, as opposed to knowing each parent's children.
"""
def build_relations(dict, comment=None, depth=-1):
if comment is None: id = None
else: id = comment.id
try:
children = dict[id]
children.reverse()
return [(comment, depth), [build_relations(dict, x, depth+1) for x in children]]
except:
return (comment, depth)
def flatten(l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
def group(seq, length):
"""
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496784
"""
return [seq[i:i+length] for i in range(0, len(seq), length)]
dict = {None:[]}
all = Comment.objects.select_related().filter(entry=self)
for comment in all:
if comment.parent: id = comment.parent.id
else: id = None
try:
dict[id].append(comment)
except KeyError:
dict[id] = [comment]
relations = build_relations(dict)
# If there are no comments, return None
if len(relations) == 1:
return None
# Otherwise, throw away the None node, flatten
# the returned list, and regroup the list into
# 2-lists that look like
# [CommentInstance, 4]
# where CommentInstance is an instance of the
# Comment class, and 4 is the depth of the
# comment in the layering
else:
return group(flatten(relations[1]), 2)
class Flow(models.Model):
"""
A genre of entries. Like things about Cooking, or Japan.
Broader than a tag, and gets its own nav link and is available
at /slug/ instead of /tags/slug/
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
def __unicode__(self):
return self.title
def latest(self, qty=None):
if qty is None:
return self.entry_set.all().filter(**{'pub_date__lte': datetime.datetime.now()}).filter(**{'is_translation':False})
else:
return self.entry_set.all().filter(**{'pub_date__lte': datetime.datetime.now()}).filter(**{'is_translation':False})[:qty]
def get_absolute_url(self):
return u"/%s/" % self.slug
class Language(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField()
def __unicode__(self):
return self.title
def get_absolute_url(self):
return u"/language/%s/" % self.slug
def latest(self, qty=None):
translations = self.translation_set.all().filter(**{'translated__pub_date__lte': datetime.datetime.now()})
return [ x.translated for x in translations ]
class Project(models.Model):
"""
A project of any kind. Think of it as a piece in a portfolio.
"""
title = models.CharField(max_length=50)
slug = models.SlugField(
help_text='Automatically built from the title.'
)
summary = models.TextField(help_text="One paragraph. Don't add <p> tag.")
body = models.TextField(
help_text='Use <a href="http://daringfireball.net/projects/markdown/syntax">Markdown-syntax</a>')
body_html = models.TextField(blank=True, null=True)
use_markdown = models.BooleanField(default=True)
language = models.CharField(
max_length=50,
help_text="The programming language the project is written in.",
)
license = models.CharField(
max_length=50,
help_text="The license under which the project is released.",
)
resources = models.ManyToManyField('Resource', blank=True, null=True)
SIZE_CHOICES = (
('0', 'Script'),
('1', 'Small'),
('2', 'Medium'),
('3', 'Large'),
)
size = models.CharField(
max_length=1, choices=SIZE_CHOICES,
help_text="Used for deciding order projects will be displayed in.",
)
class Meta:
ordering = ('-size',)
def __unicode__(self):
return self.title
def size_string(self):
if self.size == str(0): return "Script"
if self.size == str(1): return "Small"
elif self.size == str(2): return "Medium"
elif self.size == str(3): return "Large"
def get_absolute_url(self):
return u"/projects/%s/" % self.slug
def save(self):
if self.use_markdown:
self.body_html = entry_markup(self.body, self)
else:
self.body_html = self.body
super(Project,self).save()
class Resource(models.Model):
"""
A wrapper for files (image or otherwise, the model is unaware of the
distinction) that are used in blog entries.
"""
title = models.CharField(max_length=50)
markdown_id = models.CharField(max_length=50)
content = models.FileField(upload_to="lifeflow/resource")
def get_relative_url(self):
# figure out why I named this relative instead of absolute
# because... it sure as hell isn't relative
return self.content.url
def __unicode__(self):
return u"[%s] %s" % (self.markdown_id, self.title,)
class RecommendedSite(models.Model):
"""
A site that is displayed under the 'Blogs-To-See' entry
on each page of the website. Akin to entries in a blog roll
on a WordPress blog.
"""
title = models.CharField(max_length=50)
url = models.URLField()
def __unicode__(self):
return u"%s ==> %s" % (self.title, self.url)
class Series(models.Model):
"""
A series is a collection of Entry instances on the same theme.
"""
title = models.CharField(max_length=200)
slug= models.SlugField()
class Meta:
ordering = ('-id',)
verbose_name_plural = "Series"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return u"/articles/%s/" % ( unicode(self.slug), )
def latest(self, qty=10):
return self.entry_set.all().filter(**{'pub_date__lte': datetime.datetime.now()})[:qty]
def in_order(self):
return self.entry_set.filter(**{'pub_date__lte': datetime.datetime.now()}).order_by('id')
def num_articles(self):
return self.entry_set.all().count()
class SiteToNotify(models.Model):
"""
SiteToNotify instances are pinged by Entries where
someEntry.ping_sites is True.
Sites such as 'Ping-O-Matic' are easiest to use here.
Manually creating the Ping-O-Matic instance looks
something like this:
stn = SiteToNotify(title="Ping-O-Matic",
url_to_ping="http://rpc.pingomatic.com/",
blog_title="My Blog's Title",
blog_url="http://www.myblog.com")
stn.save()
"""
title = models.CharField(max_length=100)
url_to_ping = models.CharField(max_length=200)
blog_title = models.CharField(max_length=100)
blog_url = models.CharField(max_length=200)
class Meta:
verbose_name_plural = "Sites to Notify"
def __unicode__(self):
return self.title
def ping(self):
def do_ping():
remote_server = xmlrpclib.Server(self.url_to_ping)
remote_server.weblogUpdates.ping(self.blog_title, self.blog_url)
thread.start_new_thread(do_ping, ())
class Tag(models.Model):
"Tags are associated with Entry instances to describe their contents."
title = models.CharField(max_length=50)
slug = models.SlugField()
def __unicode__(self):
return self.title
def get_absolute_url(self):
return u"/tags/%s/" % self.slug
def random(self):
return self.entry_set.filter(**{'pub_date__lte': datetime.datetime.now()}).order_by('?')
def latest(self, qty=None):
if qty is None:
return self.entry_set.all().filter(**{'pub_date__lte': datetime.datetime.now()})
else:
return self.entry_set.all().filter(**{'pub_date__lte': datetime.datetime.now()})[:qty]
def get_max_tags(self):
max = cache.get('lifeflow_tags_max')
if max == None:
tags = Tag.objects.all()
max = 0
for tag in tags:
count = tag.entry_set.count()
if count > max: max = count
cache.set('lifeflow_tags_max', max)
return max
def tag_size(self):
max = self.get_max_tags()
count = self.entry_set.count()
ratio = (count * 1.0) / max
tag_name = "size"
if ratio < .2: return tag_name + "1"
elif ratio < .4: return tag_name + "2"
elif ratio < .6: return tag_name + "3"
elif ratio < .8: return tag_name + "4"
else: return tag_name + "5"
class Translation(models.Model):
"""
Link together two entries, where @translated is a translation of
@original in the language @language.
"""
language = models.ForeignKey('Language')
original = models.ForeignKey('Entry')
translated = models.ForeignKey('Entry', related_name="translated")
def __unicode__(self):
return u"Translation of %s into %s" % (self.original, self.language,)
def get_link(self):
url = self.translated.get_absolute_url()
return u'<a href="%s">%s</a>' % (url, self.language,)
def get_absolute_url(self):
return self.translated.get_absolute_url()
def resave_object(sender, instance, signal, *args, **kwargs):
"""
This is called to save objects a second time after required
manyTomany relationships have been established.
There must be a better way of handling this.
"""
def do_save():
time.sleep(3)
try:
instance.save()
except:
pass
id = u"%s%s" % (unicode(instance), unicode(instance.id))
try:
should_resave = resave_hist[id]
except KeyError:
resave_hist[id] = True
should_resave = True
if should_resave is True:
resave_hist[id] = False
thread.start_new_thread(do_save, ())
else:
resave_hist[id] = True
resave_hist = {}
signals.post_save.connect(resave_object, sender=Project)
signals.post_save.connect(resave_object, sender=Entry)
########NEW FILE########
__FILENAME__ = search
import solango
from lifeflow.models import Comment, Entry
class EntryDocument(solango.SearchDocument):
date = solango.fields.DateField()
summary = solango.fields.TextField(copy=True)
title = solango.fields.CharField(copy=True)
tags = solango.fields.CharField(copy=True)
content = solango.fields.TextField(copy=True)
def transform_summary(self, instance):
return instance.summary
def transform_tags(self, instance):
tags = list(instance.tags.all())
texts = [ tag.title for tag in tags ]
return ",".join(texts)
def transform_date(self, instance):
return instance.pub_date
def transform_content(self, instance):
return instance.body
solango.register(Entry, EntryDocument)
########NEW FILE########
__FILENAME__ = sitemaps
from django.contrib.sitemaps import Sitemap
from lifeflow.models import Project, Entry
class ProjectSitemap(Sitemap):
changefreq = 'monthly'
priority = 0.9
def items(self):
return Project.objects.all()
########NEW FILE########
__FILENAME__ = lifeflow
from django import template
register = template.Library()
def boundary(value, arg):
"""Defines a boundary for an integer. If the value of the integer
is higher than the boundary, then the boundary is returned instead.
Example: {{ comment.depth|:"4" }} will return 4 if the value of
comment.depth is 4 or higher, but will return 1, 2 or 3 if the
value of comment.depth is 1, 2 or 3 respectively.
"""
value = int(value)
boundary = int(arg)
if value > boundary:
return boundary
else:
return value
register.filter('boundary', boundary)
def nearby(lst, obj, count=5):
lst = list(lst)
l = len(lst)
try:
pos = lst.index(obj)
except ValueError:
pos = 0
dist = count / 2
if pos <= dist:
return lst[:count]
if pos >= l - dist:
return lst[l-count:]
else:
return lst[pos-dist:pos+dist+1]
register.filter('nearby', nearby)
def human(lst, field):
lst = list(lst)
lst.sort(lambda a,b : cmp(getattr(a,field).lower(),
getattr(b,field).lower()))
return lst
register.filter('human', human)
########NEW FILE########
__FILENAME__ = tests
import unittest
from django.test.client import Client
from lifeflow.models import *
import datetime
import pygments.lexers as lexers
#response = self.client.get('/api/case/retrieve/', {})
#self.assertEquals(response.content, 'etc')
class commentTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_organize_comments(self):
"models.py: test organize_comments method for Entry"
e = Entry(title="My Entry",
pub_date=datetime.datetime.now(),
summary="A summary",
body="Some text")
e.save()
c1 = Comment(entry=e, body="Some comment one.")
c1.save()
self.assertEquals([[c1, 0]], e.organize_comments())
c2 = Comment(entry=e, name="Two", body="Some comment two.")
c2.save()
self.assertEquals([[c2,0],[c1,0]], e.organize_comments())
c3 = Comment(entry=e, name="Three", parent=c1, body="Three")
c3.save()
self.assertEquals([[c2, 0], [c1,0], [c3,1]],
e.organize_comments())
c4 = Comment(entry=e, name="Four", parent=c2, body="Four")
c4.save()
self.assertEquals([[c2,0], [c4, 1], [c1,0], [c3,1]],
e.organize_comments())
class codeMarkupTest(unittest.TestCase):
def test_markup(self):
"markup/markdown.py: test markdown"
txt = "this is some text"
expected = u"<p>this is some text\n</p>"
rendered = dbc_markup(txt).strip("\n")
self.assertEqual(expected, rendered)
def test_code_markup(self):
"markup/code.py: test code markup"
txt = u" some code in a code block\n is nice\n"
expected = u'<pre><code>some code in a code block\nis nice\n</code></pre>'
self.assertEqual(expected, dbc_markup(txt))
txt = u"<pre>this is some stuff\nthat I am concerned about</pre>"
self.assertEqual(txt, dbc_markup(txt))
txt = u"@@ python\nx = 10 * 5\n@@\n"
expected = u'<div class="highlight"><pre><span class="n">x</span> <span class="o">=</span> <span class="mi">10</span> <span class="o">*</span> <span class="mi">5</span>\n</pre></div>'
self.assertEqual(expected, dbc_markup(txt))
txt = u"@@ python\ndef test(a,b):\n return x + y\n@@\n"
expected = u'<div class="highlight"><pre><span class="k">def</span> <span class="nf">test</span><span class="p">(</span><span class="n">a</span><span class="p">,</span><span class="n">b</span><span class="p">):</span>\n <span class="k">return</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span>\n</pre></div>'
self.assertEqual(expected, dbc_markup(txt))
def test_using_non_existant_language(self):
"markup/code.py: test improperly formed code markup"
cases = (
u"@@\ndef test(a,b):\n@@\n",
u"@@ fake-language\n(+ 1 2 3)\n@@\n",
)
for case in cases:
self.assertRaises(lexers.ClassNotFound,
lambda : dbc_markup(case))
def test_lfmu(self):
"markup/lifeflowmarkdown.py: test lifeflow markup"
e = Entry(title="My Entry",
pub_date=datetime.datetime.now(),
summary="A summary",
body="Some text")
e.save()
a = Author(name="Will Larson",
slug="will-larson",
link="a")
a.save()
e2= Entry(title="My Entry",
pub_date=datetime.datetime.now(),
summary="A summary",
body="Some text",
)
e2.save()
e2.authors.add(a)
e2.save()
t = Tag(title="LifeFlow", slug="lifeflow")
t.save()
c1 = Comment(entry=e, body="Some comment one.")
c1.save()
p = Project(title="Lifeflow",
slug="lifeflow",
summary="A summary",
body="Some text")
p.save()
self.assertEqual(dbc_markup("[trying out a tag][tag lifeflow]", e),
u'<p><a href="/tags/lifeflow/">trying out a tag</a>\n</p>')
self.assertEqual(dbc_markup("[and the author][author]", e),
u'<p><a href="/author/">and the author</a>\n</p>')
self.assertEqual(dbc_markup("[about will][author]", e2),
u'<p><a href="/author/will-larson/">about will</a>\n</p>')
#self.assertEqual(dbc_markup("[the first comment][comment 1]", e),
# u'<p><a href="/entry/2008/jan/12//#comment_1">the first comment</a>\n</p>')
self.assertEqual(dbc_markup("[lf proj][project lifeflow]", e),
u'<p><a href="/projects/lifeflow/">lf proj</a>\n</p>')
# test for [file name]
# test for [f name]
########NEW FILE########
__FILENAME__ = text_filters
"""
This file contains filters which are used for pre and post
processing various kinds of text within LifeFlow.
Which values are applied is controlled by a number of global
variables within the project's settings.py file. These vars
are:
LIFEFLOW_ENTRY_FILTERS
LIFEFLOW_COMMENT_FILTERS
If you wish to add your own filters, you don't
have to add them to this file, they can exist anywhere, and
simply import them into the settings.py file and add them
to the appropriate global variable.
The API for these processing functions is very simple:
they accept two parameters, a string to process,
and optionally a related model.
"""
from django.conf import settings
from lifeflow.markdown.markdown import Markdown
from lifeflow.markdown import mdx_lifeflow
from lifeflow.markdown import mdx_code
from lifeflow.markdown import mdx_footnotes
from lifeflow.markdown import mdx_foreign_formats
def convert_string(str):
if LOCALS.has_key(str):
return LOCALS[str]
else:
return str
def comment_markup(txt,obj=None):
filters = getattr(settings,'LIFEFLOW_COMMENT_FILTERS', DEFAULT_COMMENT_FILTERS)
filters = [convert_string(filter) for filter in filters]
for filter in filters:
txt = filter(txt)
return txt
def entry_markup(txt,obj=None):
filters = getattr(settings,'LIFEFLOW_ENTRY_FILTERS', DEFAULT_ENTRY_FILTERS)
for filter in filters:
txt = filter(txt)
return txt
def comment_markdown(txt,obj=None):
exts = (mdx_code,)
md = Markdown(txt,extensions=exts,safe_mode="escape")
return md.convert()
def entry_markdown(txt,obj=None):
exts = (mdx_code, mdx_footnotes,mdx_foreign_formats, mdx_lifeflow)
md = Markdown(txt,extensions=exts,extension_configs={'lifeflow':obj})
return md.convert()
LOCALS = locals()
DEFAULT_COMMENT_FILTERS = (comment_markdown,)
DEFAULT_ENTRY_FILTERS = (entry_markdown,)
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import *
from lifeflow.feeds import *
from lifeflow.models import *
from lifeflow.sitemaps import ProjectSitemap
from django.contrib.sitemaps import GenericSitemap
from django.views.decorators.cache import cache_page
from django.contrib.syndication.views import feed
# Cache
def cache(type):
return cache_page(type, 60*30)
handler500 = 'lifeflow.views.server_error'
flows = Flow.objects.all()
projects = Project.objects.all()
tags = Tag.objects.all()
languages = Language.objects.all()
authors = Author.objects.all()
feeds = {
'author': AuthorFeed,
'all' : AllFeed,
'flow' : FlowFeed,
'tag' : TagFeed,
'series' : SeriesFeed,
'translations' : TranslationFeed,
'projects' : ProjectFeed,
'comment' : CommentFeed,
'entry_comment' : EntryCommentFeed,
'language' : LanguageFeed,
}
all_dict = {
'queryset' : Entry.objects.all(),
'date_field' : 'pub_date',
}
sitemaps = {
'projects' : ProjectSitemap,
'entries' : GenericSitemap(all_dict, priority=0.6),
}
urlpatterns = patterns(
'',
url(r'^$', 'lifeflow.views.front'),
url(r'^sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
# comments
url(r'^comments/create/$', 'lifeflow.views.comments'),
url(r'^comments/create/(?P<entry_id>\d+)/$', 'lifeflow.views.comments'),
url(r'^comments/create/(?P<entry_id>\d+)/(?P<parent_id>\d+)/$', 'lifeflow.views.comments'),
# feeds and rss views
url(r'^feeds/(?P<url>.*)/$', cache(feed), {'feed_dict': feeds}),
url(r'^meta/rss/$', 'lifeflow.views.rss'),
# date based generic views
url(r'^entry/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[-\w]+)/$', 'django.views.generic.date_based.object_detail', dict(all_dict, slug_field='slug')),
url(r'^entry/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', 'django.views.generic.date_based.archive_day', all_dict),
url(r'^entry/(?P<year>\d{4})/(?P<month>[a-z]{3})/$', 'django.views.generic.date_based.archive_month', all_dict),
url(r'^entry/(?P<year>\d{4})/$', 'django.views.generic.date_based.archive_year', all_dict),
url(r'^entry/$', 'django.views.generic.date_based.archive_index', all_dict),
# tag generic views
url(r'^tags/$', 'django.views.generic.list_detail.object_list', dict(queryset=tags)),
url(r'^tags/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=tags, slug_field='slug')),
# language generic views
url(r'^language/$', 'django.views.generic.list_detail.object_list', dict(queryset=languages)),
url(r'^language/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=languages, slug_field='slug')),
# author generic views
url(r'^author/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=authors, slug_field='slug')),
url(r'^author/$', 'django.views.generic.list_detail.object_list', dict(queryset=authors)),
# articles views (custom view)
url(r'^articles/$', 'lifeflow.views.articles'),
# projects views
url(r'^projects/$', 'django.views.generic.list_detail.object_list', dict(queryset=projects)),
url(r'^projects/(?P<slug>[-\w]+)/$', 'django.views.generic.list_detail.object_detail', dict(queryset=projects, slug_field='slug')),
# editor
url(r'^editor/', include('lifeflow.editor.urls')),
# flows
url(r'^(?P<slug>[-\w]+)/$', 'lifeflow.views.flow'),
)
########NEW FILE########
__FILENAME__ = views
"""
Views.py
Author: Will Larson
Contact: [email protected]
Contains one custom view for displaying articles.
Mostly necessary to presort the articles in order
of descending size.
"""
import datetime, time, random, cgi, md5
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponseRedirect
from django.conf import settings
from django.core.paginator import QuerySetPaginator
from lifeflow.models import Series, Flow, Entry, Comment
from lifeflow.forms import CommentForm
from django.core.cache import cache
from django.http import HttpRequest
from django.utils.cache import get_cache_key
def expire_page(path):
'http://www.djangosnippets.org/snippets/936/'
request = HttpRequest()
request.path = path
key = get_cache_key(request)
if cache.has_key(key):
cache.delete(key)
def server_error(request):
return render_to_response('500.html',{},RequestContext(request,{}))
def articles(request):
object_list = Series.objects.all()
return render_to_response('lifeflow/articles.html', {'object_list' : object_list},RequestContext(request, {}))
def comments(request, entry_id=None, parent_id=None):
def make_identifier(id, time):
secret = getattr(settings, 'SECRET_KEY')
time = time[:-4]
data = "%s%s%s%s" % ("lifeflow", id, time, secret)
return md5.md5(data).hexdigest()
# if an entry ID has been posted, use that
if request.POST.has_key('entry_id'):
id = int(request.POST['entry_id'])
# otherwise use the parameter
elif entry_id is None:
return render_to_response('lifeflow/invalid_comment.html',{},RequestContext(request, {}))
else:
id = int(entry_id)
# TODO: validate ID, throw 500 otherwise
entry = Entry.objects.get(pk=id)
if request.POST.has_key('parent_id') and request.POST['parent_id'] != u"":
parent_id = int(request.POST['parent_id'])
parent = Comment.objects.get(pk=parent_id)
elif parent_id is None:
parent = None
else:
parent_id = int(parent_id)
parent = Comment.objects.get(pk=parent_id)
# add an identifier to the post, part of the
# anti-spam implementation
if request.POST.has_key('identifier') is False:
now = unicode(time.time()).split('.')[0]
identifier = make_identifier(id, now)
# or make a new identifier
else:
identifier = request.POST['identifier']
now = request.POST['time']
form = CommentForm(request.POST)
form.is_valid()
# Initial submission from entry_detail.html
if request.POST.has_key('submit'):
for i in xrange(5,8):
name = u"honey%s" % i
value = request.POST[name]
if value != u"":
raise Http404
if time.time() - int(now) > 3600:
raise Http404
if identifier != make_identifier(id, now):
raise Http404
name = form.cleaned_data['name']
email = form.cleaned_data['email']
webpage = form.cleaned_data['webpage']
html = form.cleaned_data['html']
body = form.cleaned_data['body']
c = Comment(entry=entry,parent=parent,name=name,email=email,
webpage=webpage,body=body,html=html)
c.save()
url = u"%s#comment_%s" % (entry.get_absolute_url(), c.pk)
expire_page(entry.get_absolute_url())
return HttpResponseRedirect(url)
return render_to_response(
'lifeflow/comment.html',
{'object':entry,'parent':parent,'identifier':identifier,'time':now,'form':form},
RequestContext(request, {}))
def flow(request, slug):
try:
flow = Flow.objects.get(slug=slug)
except Flow.DoesNotExist:
raise Http404
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Flow.objects.get(slug=slug).latest(), 5).page(page)
return render_to_response('lifeflow/flow_detail.html',
{'object' : flow, 'page' : page,},
RequestContext(request, {}))
def front(request):
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Entry.current.all(), 5).page(page)
return render_to_response('lifeflow/front.html', {'page':page}, RequestContext(request, {}))
def rss(request):
flows = Flow.objects.all()
return render_to_response('lifeflow/meta_rss.html', {'flows' : flows }, RequestContext(request, {}))
########NEW FILE########
| [
"[email protected]"
] | |
12acb8fffc162e13711169757c4c0ec9dd2a2c38 | 72ffd01f575aa38ae1f1b5d2a3c802dc684fdb33 | /convlab2/nlg/sclstm/crosswoz/sc_lstm.py | 4575ce7ab12acae902b85d4d9570a6a6071c3a8b | [
"Apache-2.0"
] | permissive | ArthurRizar/CrossWOZ | f721ea0459d6c3afad8aefb38f710139b8827f28 | 185d78b415f33bb6ad01314e6573d4bfef320aae | refs/heads/master | 2021-03-07T06:20:50.066345 | 2020-03-09T12:29:50 | 2020-03-09T12:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,209 | py | import configparser
import os
import zipfile
from copy import deepcopy
from collections import defaultdict
from pprint import pprint
import torch
import re
from convlab2.util.file_util import cached_path
from convlab2.nlg.sclstm.multiwoz.loader.dataset_woz import SimpleDatasetWoz
from convlab2.nlg.sclstm.model.lm_deep import LMDeep
from convlab2.nlg.nlg import NLG
DEFAULT_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
DEFAULT_ARCHIVE_FILE = os.path.join(DEFAULT_DIRECTORY, "nlg-sclstm-multiwoz.zip")
def parse(is_user):
if is_user:
args = {
'model_path': 'sclstm_usr.pt',
'n_layer': 1,
'beam_size': 10
}
else:
args = {
'model_path': 'sclstm.pt',
'n_layer': 1,
'beam_size': 10
}
config = configparser.ConfigParser()
if is_user:
config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config/config_usr.cfg'))
else:
config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config/config.cfg'))
config.set('DATA', 'dir', os.path.dirname(os.path.abspath(__file__)))
return args, config
class SCLSTM(NLG):
def __init__(self,
archive_file=DEFAULT_ARCHIVE_FILE,
use_cuda=False,
is_user=False,
model_file='https://tatk-data.s3-ap-northeast-1.amazonaws.com/nlg_sclstm_multiwoz.zip'):
if not os.path.isfile(archive_file):
if not model_file:
raise Exception("No model for SC-LSTM is specified!")
archive_file = cached_path(model_file)
model_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(os.path.join(model_dir, 'resource')):
archive = zipfile.ZipFile(archive_file, 'r')
archive.extractall(model_dir)
self.USE_CUDA = use_cuda
self.args, self.config = parse(is_user)
self.dataset = SimpleDatasetWoz(self.config)
# get model hyper-parameters
hidden_size = self.config.getint('MODEL', 'hidden_size')
# get feat size
d_size = self.dataset.do_size + self.dataset.da_size + self.dataset.sv_size # len of 1-hot feat
vocab_size = len(self.dataset.word2index)
self.model = LMDeep('sclstm', vocab_size, vocab_size, hidden_size, d_size, n_layer=self.args['n_layer'], use_cuda=use_cuda)
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args['model_path'])
# print(model_path)
assert os.path.isfile(model_path)
self.model.load_state_dict(torch.load(model_path))
self.model.eval()
for name, param in self.model.named_parameters():
print(name, param.shape, param.device, param.requires_grad)
if use_cuda:
self.model.cuda()
def generate_delex(self, meta):
"""
meta = [
[
"General",
"greet",
"none",
"none"
],
[
"Request",
"景点",
"名称",
""
],
[
"Inform",
"景点",
"门票",
"免费"
]
]
"""
intent_list = []
intent_frequency = defaultdict(int)
feat_dict = dict()
for act in meta:
cur_act = deepcopy(act)
# intent list
facility = None # for 酒店设施
if '酒店设施' in cur_act[2]:
facility = cur_act[2].split('-')[1]
if cur_act[0] == 'Inform':
cur_act[2] = cur_act[2].split('-')[0] + '+' + cur_act[3]
elif cur_act[0] == 'Request':
cur_act[2] = cur_act[2].split('-')[0]
if cur_act[0] == 'Select':
cur_act[2] = '源领域+' + cur_act[3]
intent = '+'.join(cur_act[:-1])
if '+'.join(cur_act) == 'Inform+景点+门票+免费' or str(cur_act[-1]) == '无':
intent = '+'.join(cur_act)
intent_list.append(intent)
intent_frequency[intent] += 1
# content replacement
value = 'none'
freq = 'none'
if (act[0] in ['Inform', 'Recommend'] or '酒店设施' in intent) and not intent.endswith('无'):
if '酒店设施' in intent:
value = facility
else:
value = act[3]
freq = str(intent_frequency[intent])
elif act[0] == 'Request':
freq = '?'
value = '?'
elif act[0] == 'Select':
value = act[3]
# generate the formation in feat.json
new_act = intent.split('+')
if new_act[0] == 'General':
feat_key = new_act[0] + '-' + new_act[1]
else:
feat_key = new_act[1] + '-' + new_act[0]
if new_act[2] == '酒店设施' and new_act[0] == 'Inform':
try:
feat_value = [new_act[2] + '+' + new_act[3], freq, value]
except:
print(new_act)
elif intent.endswith('无'):
feat_value = [new_act[2] + '+无', freq, value]
elif intent.endswith('免费'):
feat_value = [new_act[2] + '+免费', freq, value]
else:
feat_value = [new_act[2], freq, value]
feat_dict[feat_key] = feat_dict.get(feat_key, [])
feat_dict[feat_key].append(feat_value)
meta = deepcopy(feat_dict)
# remove invalid dialog act
meta_ = deepcopy(meta)
for k, v in meta.items():
for triple in v:
voc = 'd-a-s-v:' + k + '-' + triple[0] + '-' + triple[1]
if voc not in self.dataset.cardinality:
meta_[k].remove(triple)
if not meta_[k]:
del (meta_[k])
meta = meta_
# mapping the inputs
do_idx, da_idx, sv_idx, featStr = self.dataset.getFeatIdx(meta)
do_cond = [1 if i in do_idx else 0 for i in range(self.dataset.do_size)] # domain condition
da_cond = [1 if i in da_idx else 0 for i in range(self.dataset.da_size)] # dial act condition
sv_cond = [1 if i in sv_idx else 0 for i in range(self.dataset.sv_size)] # slot/value condition
feats = [do_cond + da_cond + sv_cond]
feats_var = torch.FloatTensor(feats)
if self.USE_CUDA:
feats_var = feats_var.cuda()
decoded_words = self.model.generate(self.dataset, feats_var, self.args['beam_size'])
delex = decoded_words[0] # (beam_size)
return delex
def generate_slots(self, meta):
meta = deepcopy(meta)
delex = self.generate_delex(meta)
# get all informable or requestable slots
slots = []
for sen in delex:
slot = []
counter = {}
words = sen.split()
for word in words:
if word.startswith('slot-'):
placeholder = word[5:]
if placeholder not in counter:
counter[placeholder] = 1
else:
counter[placeholder] += 1
slot.append(placeholder+'-'+str(counter[placeholder]))
slots.append(slot)
# for i in range(self.args.beam_size):
# print(i, slots[i])
return slots[0]
def _value_replace(self, sentences, dialog_act):
ori_sen = deepcopy(sentences)
dialog_act = deepcopy(dialog_act)
intent_frequency = defaultdict(int)
for act in dialog_act:
intent = self._prepare_intent_string(deepcopy(act))
intent_frequency[intent] += 1
if intent_frequency[intent] > 1: # if multiple same intents...
intent += str(intent_frequency[intent])
if '酒店设施' in intent:
try:
sentences = sentences.replace('[' + intent + ']', act[2].split('-')[1])
sentences = sentences.replace('[' + intent + '1]', act[2].split('-')[1])
except Exception as e:
print('Act causing problem in replacement:')
pprint(act)
raise e
if act[0] == 'Inform' and act[3] == "无":
sentences = sentences.replace('[主体]', act[1])
sentences = sentences.replace('[属性]', act[2])
sentences = sentences.replace('[' + intent + ']', act[3])
sentences = sentences.replace('[' + intent + '1]', act[3]) # if multiple same intents and this is 1st
# if '[' in sentences and ']' in sentences:
# print('\n\nValue replacement not completed!!! Current sentence: %s' % sentences)
# print('Current DA:')
# pprint(dialog_act)
# print('ori sen', ori_sen)
# pattern = re.compile(r'(\[[^\[^\]]+\])')
# slots = pattern.findall(sentences)
# for slot in slots:
# sentences = sentences.replace(slot, ' ')
# print('after replace:', sentences)
# raise Exception
return sentences
def _prepare_intent_string(self, cur_act):
"""
Generate the intent form **to be used in selecting templates** (rather than value replacement)
:param cur_act: one act list
:return: one intent string
"""
cur_act = deepcopy(cur_act)
if cur_act[0] == 'Inform' and '酒店设施' in cur_act[2]:
cur_act[2] = cur_act[2].split('-')[0] + '+' + cur_act[3]
elif cur_act[0] == 'Request' and '酒店设施' in cur_act[2]:
cur_act[2] = cur_act[2].split('-')[0]
if cur_act[0] == 'Select':
cur_act[2] = '源领域+' + cur_act[3]
try:
if '+'.join(cur_act) == 'Inform+景点+门票+免费':
intent = '+'.join(cur_act)
# "Inform+景点+周边酒店+无"
elif cur_act[3] == '无':
intent = '+'.join(cur_act)
else:
intent = '+'.join(cur_act[:-1])
except Exception as e:
print('Act causing error:')
pprint(cur_act)
raise e
return intent
def generate(self, meta):
meta = [[str(x[0]), str(x[1]), str(x[2]), str(x[3]).lower()] for x in meta]
meta = deepcopy(meta)
delex = self.generate_delex(meta)
return self._value_replace(delex[0].replace('UNK_token', '').replace(' ', ''), meta)
if __name__ == '__main__':
model_sys = SCLSTM(is_user=True, use_cuda=True)
print(model_sys.generate([['Inform', '餐馆', '人均消费', '100-150元'], ['Request', '餐馆', '电话', '']]))
| [
"[email protected]"
] | |
fc3dceb9403798a3866deac0936754eba2b83220 | c30906c50ea0fbcccbf080b89eca84edb9f04673 | /DaVinci_scripts/MC/twoBody/Kpi/2015_Bd_DKstar.py | 29aa2cce06d3f6b0e0dbe7c4ca0c63a60f462224 | [] | no_license | hpullen/DPhil_B02DKstar_analysis | 543661c4c2e978fb7f60a1d81f27bc660710994d | 651b3f333d3959e78512fc294afa334e3ea26fd9 | refs/heads/master | 2023-07-15T17:38:53.009366 | 2021-08-25T19:40:42 | 2021-08-25T19:40:42 | 107,555,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,162 | py | from GaudiConf import IOHelper
from Configurables import DaVinci
from Configurables import EventTuple
from Configurables import DecayTreeTuple
from Configurables import L0TriggerTisTos, TriggerTisTos
from Configurables import TupleToolPropertime, TupleToolTISTOS
from Configurables import TupleToolStripping, TupleToolDecayTreeFitter
from Configurables import TupleToolGeometry, TupleToolKinematic
from Configurables import TupleToolPrimaries
from Configurables import TupleToolTrackInfo, TupleToolDecay
from Configurables import TupleToolTrackIsolation
from Configurables import LoKi__Hybrid__TupleTool, LoKi__Hybrid__EvtTupleTool
from DecayTreeTuple.Configuration import *
# ========================================
# Stream and stripping line we want to use
# ========================================
stream = 'AllStreams'
line = 'B02D0KPiD2HHBeauty2CharmLine'
# =============================================================
# Create an ntuple to capture B0 decays from the stripping line
# =============================================================
dtt = DecayTreeTuple('Tuple_Kpi')
dtt.Inputs = ['/Phys/{0}/Particles'.format(line)]
dtt.Decay = ('[[B0 -> ^(D0 -> ^K- ^pi+) ^(K*(892)0 -> ^K+ ^pi-)]CC,'
'[B0 -> ^(D0 -> ^K+ ^pi-) ^(K*(892)0 -> ^K+ ^pi-)]CC,'
'[B0 -> ^(D0 -> ^K- ^pi+) ^(K*(892)~0 -> ^K- ^pi+)]CC,'
'[B0 -> ^(D0 -> ^K+ ^pi-) ^(K*(892)~0 -> ^K- ^pi+)]CC]')
# ============
# Add branches
# ============
dtt.addBranches({
"Bd": ('[[B0 -> (D0 -> K- pi+) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> K+ pi-) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> K- pi+) (K*(892)~0 -> K- pi+)]CC,'
'[B0 -> (D0 -> K+ pi-) (K*(892)~0 -> K- pi+)]CC]'),
"D0": ('[[B0 -> ^(D0 -> K- pi+) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> ^(D0 -> K+ pi-) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> ^(D0 -> K- pi+) (K*(892)~0 -> K- pi+)]CC,'
'[B0 -> ^(D0 -> K+ pi-) (K*(892)~0 -> K- pi+)]CC]'),
"D0K": ('[[B0 -> (D0 -> ^K- pi+) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> ^K+ pi-) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> ^K- pi+) (K*(892)~0 -> K- pi+)]CC,'
'[B0 -> (D0 -> ^K+ pi-) (K*(892)~0 -> K- pi+)]CC]'),
"D0Pi": ('[[B0 -> (D0 -> K- ^pi+) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> K+ ^pi-) (K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> K- ^pi+) (K*(892)~0 -> K- pi+)]CC,'
'[B0 -> (D0 -> K+ ^pi-) (K*(892)~0 -> K- pi+)]CC]'),
"Kstar": ('[[B0 -> (D0 -> K- pi+) ^(K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> K+ pi-) ^(K*(892)0 -> K+ pi-)]CC,'
'[B0 -> (D0 -> K- pi+) ^(K*(892)~0 -> K- pi+)]CC,'
'[B0 -> (D0 -> K+ pi-) ^(K*(892)~0 -> K- pi+)]CC]'),
"KstarK": ('[[B0 -> (D0 -> K- pi+) (K*(892)0 -> ^K+ pi-)]CC,'
'[B0 -> (D0 -> K+ pi-) (K*(892)0 -> ^K+ pi-)]CC,'
'[B0 -> (D0 -> K- pi+) (K*(892)~0 -> ^K- pi+)]CC,'
'[B0 -> (D0 -> K+ pi-) (K*(892)~0 -> ^K- pi+)]CC]'),
"KstarPi": ('[[B0 -> (D0 -> K- pi+) (K*(892)0 -> K+ ^pi-)]CC,'
'[B0 -> (D0 -> K+ pi-) (K*(892)0 -> K+ ^pi-)]CC,'
'[B0 -> (D0 -> K- pi+) (K*(892)~0 -> K- ^pi+)]CC,'
'[B0 -> (D0 -> K+ pi-) (K*(892)~0 -> K- ^pi+)]CC]')
})
# ===============================
# Group charged daughter branches
# ===============================
Kpi_h = [dtt.D0K, dtt.D0Pi, dtt.KstarK, dtt.KstarPi]
# =====================
# List of trigger lines
# =====================
triggerListL0 = ["L0HadronDecision"]
triggerListHlt1 = ["Hlt1TrackMVADecision",
"Hlt1TwoTrackMVADecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision"]
triggerListHlt2 = ["Hlt2Topo2BodyDecision",
"Hlt2Topo3BodyDecision",
"Hlt2Topo4BodyDecision"]
triggerListAll = triggerListL0 + triggerListHlt1 + triggerListHlt2
# ==============
# Add TupleTools
# ==============
dtt.ToolList = ['TupleToolAngles',
'TupleToolEventInfo',
'TupleToolGeometry',
'TupleToolKinematic',
'TupleToolPrimaries',
'TupleToolPropertime',
'TupleToolRecoStats',
'TupleToolTrackInfo',
'TupleToolMCTruth',
'TupleToolMCBackgroundInfo']
# TupleToolTISTOS
tttistos = dtt.addTupleTool("TupleToolTISTOS/tttistos")
tttistos.VerboseL0 = True
tttistos.VerboseHlt1 = True
tttistos.VerboseHlt2 = True
tttistos.TriggerList = triggerListAll
# TupleToolTrigger
tttrigger = dtt.addTupleTool("TupleToolTrigger/tttrigger")
tttrigger.Verbose = True
tttrigger.TriggerList = triggerListAll
# TupleToolStripping
ttstripping = dtt.addTupleTool("TupleToolStripping/ttstripping")
ttstripping.StrippingList = [("StrippingB02D0KPiD2HH"
"Beauty2CharmLineDecision")]
# =======================
# DecayTreeFitter
# =======================
dtt.Bd.addTupleTool('TupleToolDecayTreeFitter/ConsD')
dtt.Bd.ConsD.constrainToOriginVertex = True
dtt.Bd.ConsD.Verbose = True
dtt.Bd.ConsD.daughtersToConstrain = ['D0']
dtt.Bd.ConsD.UpdateDaughters = True
# ==============
# LoKi variables
# ==============
# Charged daughter LoKi variables
LoKi_h_vars = {
"Q": "Q",
"MIP_PV": "MIPDV(PRIMARY)",
"MIPCHI2_PV": "MIPCHI2DV(PRIMARY)",
"TRCHI2DOF": "TRCHI2DOF",
"TRGHOSTPROB": "TRGHP",
"TRTYPE": "TRTYPE",
"ETA": "ETA",
"Y": "Y"
}
# K*0 LoKi variables
LoKi_Kstar_vars = {
"Q": "Q",
"DIRA_BPV": "BPVDIRA",
"MAXDOCA": "DOCAMAX",
"AMAXDOCA": "PFUNA(AMAXDOCA(''))",
"AMINDOCA": "PFUNA(AMINDOCA(''))",
"MIP_PV": "MIPDV(PRIMARY)",
"MIPCHI2_PV": "MIPCHI2DV(PRIMARY)",
"IP_BPV": "BPVIP()",
"IPCHI2_BPV": "BPVIPCHI2()",
"FD_BPV": "BPVVD",
"FD_BPV_SIGNED": "BPVVDSIGN",
"FDCHI2_BPV": "BPVVDCHI2",
"RHO_BPV": "BPVVDRHO",
"Z_BPV": "BPVVDZ",
"LOKI_VFASPF_VCHI2VDOF": "VFASPF(VCHI2/VDOF)",
"ENDVTX_X": "VFASPF(VX)",
"ENDVTX_Y": "VFASPF(VY)",
"ENDVTX_Z": "VFASPF(VZ)",
"ETA": "ETA",
"Y": "Y",
"LT_BPV": ("BPVLTIME('PropertimeFitter/ProperTime"
"::PUBLIC')"),
"LTCHI2_BPV": ("BPVLTCHI2('PropertimeFitter/ProperTime"
"::PUBLIC')"),
"LTFITCHI2_BPV": ("BPVLTFITCHI2('PropertimeFitter/ProperTime"
"::PUBLIC')")
}
# D0 LoKi variables
LoKi_D0_vars = LoKi_Kstar_vars
# B LoKi variables
LoKi_Bd_extra_vars = {
"LV01": "LV01",
"LV02": "LV02",
"ptasy_1.50": ("RELINFO('/Event/AllStreams/Phys/"
"B02D0KPiD2HHBeauty2CharmLine/P2ConeVar1',"
"'CONEPTASYM',-1000.)"),
"ptasy_1.70": ("RELINFO('/Event/AllStreams/Phys/"
"B02D0KPiD2HHBeauty2CharmLine/P2ConeVar2',"
"'CONEPTASYM',-1000.)")
}
LoKi_Bd_vars = dict(LoKi_Kstar_vars.items() + LoKi_Bd_extra_vars.items())
# LoKi variables for event
LoKi_evt_vars = {
# Track information
"LOKI_nTracks": "RECSUMMARY(LHCb.RecSummary.nTracks, -1)",
"LOKI_nLong": "RECSUMMARY(LHCb.RecSummary.nLongTracks, -1)",
# RICH multiplicities
"LOKI_nRICH1Hits": "switch(HASRECSUMMARY(20), RECSUMMARY(20, -1), -1)",
"LOKI_nRICH2Hist": "switch(HASRECSUMMARY(21), RECSUMMARY(21, -1), -1)"
}
# ==================
# Add LoKi variables
# ==================
for branch in Kpi_h:
LoKi_h = branch.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_h")
LoKi_h.Preambulo = ["from LoKiTracks.decorators import *"]
LoKi_h.Variables = LoKi_h_vars
LoKi_Bd = dtt.Bd.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Bd")
LoKi_Bd.Variables = LoKi_Bd_vars
LoKi_Kstar = dtt.Kstar.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Kstar")
LoKi_Kstar.Variables = LoKi_Kstar_vars
LoKi_D0 = dtt.D0.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_D0")
LoKi_D0.Variables = LoKi_D0_vars
LoKi_evt = dtt.addTupleTool("LoKi::Hybrid::EvtTupleTool/LoKi_evt")
LoKi_evt.Preambulo = ['from LoKiTracks.decorators import *',
'from LoKiNumbers.decorators import *',
'from LoKiCore.functions import *']
LoKi_evt.VOID_Variables = LoKi_evt_vars
# =================
# Configure DaVinci
# =================
DaVinci().UserAlgorithms += [dtt]
DaVinci().RootInTES = '/Event/{0}'.format(stream)
DaVinci().InputType = 'MDST'
DaVinci().TupleFile = 'Tuple_Bd_DKstar.root'
DaVinci().PrintFreq = 1000
DaVinci().DataType = '2015'
DaVinci().Simulation = True
DaVinci().Lumi = False
DaVinci().EvtMax = -1 # Process all events
DaVinci().DDDBtag = 'dddb-20150724'
DaVinci().CondDBtag = 'sim-20161124-vc-md100'
| [
"[email protected]"
] | |
2ba1d13a4758708ce06af4763c6bd9aad52b1632 | 39bcdb8ab7262e9a09556540d677cac162757f74 | /items/models.py | e027feb7741490262fde716b9d8de4661da2957b | [] | no_license | NiiColeman/mywarehouse | c08c1aee3a4d8a5dd17642358a14e2b122d9cb14 | d9b2fae9ab5d164a13b208042d8e3366e3b81b79 | refs/heads/master | 2023-04-06T22:29:52.771226 | 2021-04-16T23:19:10 | 2021-04-16T23:19:10 | 224,866,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,424 | py | from django.db import models
# from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin)
# Create your models here.
from departments.models import Department
from django.shortcuts import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
def create_user(
self, username, email, first_name, last_name, password=None,
commit=True):
"""
Creates and saves a User with the given email, first name, last name
and password.
"""
if not username:
raise ValueError(_('Users must have username'))
if not email:
raise ValueError(_('Users must have an email address'))
if not first_name:
raise ValueError(_('Users must have a first name'))
if not last_name:
raise ValueError(_('Users must have a last name'))
user = self.model(
username=username,
email=self.normalize_email(email),
first_name=first_name,
last_name=last_name,
)
user.set_password(password)
if commit:
user.save(using=self._db)
return user
def create_superuser(self, username, email, first_name, last_name, password):
"""
Creates and saves a superuser with the given email, first name,
last name and password.
"""
user = self.create_user(
username=username,
email=email,
password=password,
first_name=first_name,
last_name=last_name,
commit=False,
)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=50, unique=True, null=True)
email = models.EmailField(
verbose_name=_('email address'), max_length=255, unique=True
)
# password field supplied by AbstractBaseUser
# last_login field supplied by AbstractBaseUser
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=150, blank=True)
phone_number = models.CharField(max_length=50, null=True, blank=True)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_(
'Designates whether the user can log into this admin site.'
),
)
# is_superuser field provided by PermissionsMixin
# groups field provided by PermissionsMixin
# user_permissions field provided by PermissionsMixin
date_joined = models.DateTimeField(
_('date joined'), default=timezone.now
)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name', 'username']
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def __str__(self):
return '{} <{}>'.format(self.get_full_name(), self.email)
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
def get_absolute_url(self):
return reverse("user_detail", kwargs={"pk": self.pk})
class Category(models.Model):
name = models.CharField(max_length=250)
# TODO: Define fields here
class Meta:
"""Meta definition for Category."""
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
"""Unicode representation of Category."""
return self.name
def get_absolute_url(self):
return reverse("items:category_detail", kwargs={"pk": self.pk})
def get_update_url(self):
return reverse("items:update_category", kwargs={"pk": self.pk})
def get_delete_url(self):
return reverse("items:category_delete", kwargs={"pk": self.pk})
class Item(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=250, unique=True)
image = models.ImageField(upload_to='images/', null=True, blank=True)
category = models.ForeignKey(
Category, related_name='categories', on_delete=models.CASCADE)
stock_on_hand = models.IntegerField(default=0)
expiry_date = models.DateTimeField(auto_now=False)
shelf_number = models.CharField(max_length=50)
description = models.CharField(max_length=450)
perishable = models.BooleanField(default=False)
expired = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now=True)
shelved = models.BooleanField(default=False)
# TODO: Define fields here
class Meta:
"""Meta definition for Item."""
verbose_name = 'Item'
verbose_name_plural = 'Items'
def __str__(self):
"""Unicode representation of Item."""
return self.name
def get_absolute_url(self):
return reverse("items:item_detail", kwargs={"pk": self.pk})
def get_update_url(self):
return reverse("items:item_update", kwargs={"pk": self.pk})
def get_delete_url(self):
return reverse("items:item_delete", kwargs={"pk": self.pk})
class ItemSetting(models.Model):
CHOICES = (
(30, ("1 Months")),
(60, ("2 Months")),
(90, ("3 Months")),
(120, ("4 Months"))
)
name = models.CharField(default="Item Settings", max_length=50)
low_stock_limit = models.IntegerField(default=10)
item_expiration_limit = models.IntegerField(choices=CHOICES, default=30)
class Meta:
verbose_name = ("Item Setting")
verbose_name_plural = ("Item Settings")
def __str__(self):
return self.name
class ShelfItem(models.Model):
"""Model definition for ShelfItem."""
item = models.ForeignKey(Item, on_delete=models.CASCADE)
quantity = models.IntegerField(default=0)
date_add = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
shelf = models.CharField(max_length=50)
# TODO: Define fields here
class Meta:
"""Meta definition for ShelfItem."""
verbose_name = 'Shelf Item'
verbose_name_plural = 'Shelf Items'
def __str__(self):
"""Unicode representation of ShelfItem."""
return self.item.name
def get_absolute_url(self):
return reverse("items:shelf_detail", kwargs={"pk": self.pk})
def get_update_url(self):
return reverse("items:shelf_update", kwargs={"pk": self.pk})
def get_delete_url(self):
return reverse("items:shelf_delete", kwargs={"pk": self.pk})
# TODO: Define custom methods here
| [
"[email protected]"
] | |
2a193ad76eebcee16956107da08f264bb2ddbdf3 | 78d17c3a7332be85078b513eee02f7ae4f18b3db | /lintcode/unique_binary_search_treesII.py | 5d39703efaeeda53c679c19120045959b595db4a | [] | no_license | yuhanlyu/coding-challenge | c28f6e26acedf41cef85519aea93e554b43c7e8e | 9ff860c38751f5f80dfb177aa0d1f250692c0500 | refs/heads/master | 2021-01-22T21:59:27.278815 | 2017-11-26T07:34:04 | 2017-11-26T07:34:04 | 85,498,747 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
# @paramn n: An integer
# @return: A list of root
def generateTrees(self, n):
if n == 0: return [None]
DP = [[[None] for _ in xrange(n + 2) ] for _ in xrange(n + 2)]
for k in xrange(n):
for i in xrange(1, n - k + 1):
DP[i][i + k] = []
for root in xrange(i, i + k + 1):
for left in DP[i][root - 1]:
for right in DP[root + 1][i + k]:
node = TreeNode(root)
node.left, node.right = left, right
DP[i][i + k].append(node)
return DP[1][n]
| [
"[email protected]"
] | |
1139ed09672ed55f980751bb9577805830e7ef9e | a9ef3be91fe746b44b8a4e2fcbd92b79ddc50305 | /04day/4-文件备份.py | b9e012de78ed01c6cdd0a87069d210d7ad85e780 | [] | no_license | ittoyou/2-1807 | a7718791bbc4095b6ef07003e6a2ef0b07fcd6de | 82bf09f57ccb86b88abfd4a60bca51c5cf757065 | refs/heads/master | 2020-03-25T10:29:53.324428 | 2018-09-06T01:14:43 | 2018-09-06T01:14:43 | 143,694,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | class Tool():
def beifen(self):
name = input('请输入要备份的文件名字(加上后缀名)')
f = open(name,'r')
position = name.rfind('.')
newname = name[:position]+'备份'+name[position:]
f1 = open(newname,'w')
while True:
content = f.read(1024)
if len(content) == 0:
break
f1.write(content)
f.close()
f1.close()
t = Tool()
t.beifen()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.