code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def initialize_worker(self, process_num=None):
for p in self.producers:
p.initialize_worker(process_num)
# self.initial_state.process = process_num
self.random.seed(hash(self.seed) + hash(process_num)) | inits producer for a simulation run on a single process |
def initialize_path(self, path_num=None):
for p in self.producers:
p.initialize_path(path_num)
# self.state = copy(self.initial_state)
# self.state.path = path_num
self.random.seed(hash(self.seed) + hash(path_num)) | inits producer for next path, i.e. sets current state to initial state |
def evolve(self, new_date):
self.state = [p.evolve(new_date) for p in self.producers]
return self.state | evolve to the new process state at the next date, i.e. do one step in the simulation
:param date new_date: date of the new state
:return State: |
def where_unique(cls, ip, object_id, location):
return cls.query.filter_by(
ip=ip,
object_id=object_id,
location=location).first() | Get db model by username |
def delete_where_unique(cls, ip, object_id, location):
result = cls.where_unique(ip, object_id, location)
if result is None:
return None
result.delete()
return True | delete by ip and object id |
def do_req(self, method, url, body=None, headers=None, status=None):
if body is None:
body = ''
else:
body = json.dumps(body)
res = self.backend.dispatch_request(method=method,
url=url,
body=body,
headers=self.get_headers(headers),
auth=self.auth)
if not isinstance(res, MapiResponse):
res = MapiResponse(*res)
if status is None:
if res.status // 100 != 2:
raise MapiError(*res)
elif res.status != status:
raise MapiError(*res)
return res | Used internally to send a request to the API, left public
so it can be used to talk to the API more directly. |
def _depaginate_all(self, url):
items = []
for x in self._depagination_generator(url):
items += x
return items | GETs the url provided and traverses the 'next' url that's
returned while storing the data in a list. Returns a single list of all
items. |
def create_user(self, user_id,
roles=None, netmask=None,
secret=None, pubkey=None):
u
arguments = {'id': user_id,
'roles': roles,
'netmask': netmask,
'secret': secret,
'pubkey': pubkey}
return self.do_req('POST', self.merchant_api_base_url + '/user/', arguments).json() | u"""Create user for the Merchant given in the X-Mcash-Merchant header.
Arguments:
user_id:
Identifier for the user
roles:
Role
netmask:
Limit user connections by netmask, for example 192.168.1.0/24
secret:
Secret used when authenticating with mCASH
pubkey:
RSA key used for authenticating by signing |
def update_user(self, user_id,
roles=None, netmask=None,
secret=None, pubkey=None):
arguments = {'roles': roles,
'netmask': netmask,
'secret': secret,
'pubkey': pubkey}
return self.do_req('PUT',
self.merchant_api_base_url + '/user/' +
user_id + '/', arguments) | Update user. Returns the raw response object.
Arguments:
user_id:
User id of user to update
roles:
Role
netmask:
Limit user connections by netmask, for example 192.168.1.0/24
secret:
Secret used when authenticating with mCASH
pubkey:
RSA key used for authenticating by signing |
def create_pos(self, name, pos_type,
pos_id, location=None):
arguments = {'name': name,
'type': pos_type,
'id': pos_id,
'location': location}
return self.do_req('POST', self.merchant_api_base_url + '/pos/', arguments).json() | Create POS resource
Arguments:
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
pos_id:
The ID of the POS that is to be created. Has to be unique for
the merchant |
def update_pos(self, pos_id, name, pos_type, location=None):
arguments = {'name': name,
'type': pos_type,
'location': location}
return self.do_req('PUT',
self.merchant_api_base_url + '/pos/' +
pos_id + '/', arguments) | Update POS resource. Returns the raw response object.
Arguments:
pos_id:
POS id as chosen on registration
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location |
def post_chat_message(self, merchant_id, channel_id, message):
return self.do_req('POST',
self.base_url + '/chat/v1/merchant/%s/channel/%s/message/' % (merchant_id, channel_id),
message) | post a chat message
Arguments:
channel_id:
Scan token |
def update_ticket(self, tid, tickets=None):
arguments = {'tickets': tickets}
return self.do_req('PUT',
self.merchant_api_base_url + '/payment_request/' +
tid + '/ticket/', arguments) | If the customer should be granted an electronic ticket as a result
of a successful payment, the merchant may (at any time) PUT ticket
information to this endpoint. There is an ordered list of tickets; the
merchant may PUT several times to update the list. The PUT overwrites
any existing content, so if adding additional tickets one must remember
to also include the tickets previously issued.
So far the only code type supported is "string", meaning a text code
that is displayed to the customer, however we will add QR code,
barcodes etc. soon. Please contact mCASH about supporting your
barcode.
Arguments:
tickets:
List of tickets to grant customer |
def create_shortlink(self, callback_uri=None,
description=None, serial_number=None):
arguments = {'callback_uri': callback_uri,
'description': description,
'serial_number': serial_number}
return self.do_req('POST', self.merchant_api_base_url + '/shortlink/',
arguments).json() | Register new shortlink
Arguments:
callback_uri:
URI called by mCASH when user scans shortlink
description:
Shortlink description displayed in confirmation dialogs
serial_number:
Serial number on printed QR codes. This field is only used when
registering printed stickers issued by mCASH |
def update_shortlink(self, shortlink_id, callback_uri=None,
description=None):
arguments = {'callback_uri': callback_uri,
'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/shortlink/' +
shortlink_id + '/', arguments) | Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH |
def get_shortlink(self, shortlink_id_or_url):
if "://" not in shortlink_id_or_url:
shortlink_id_or_url = self.merchant_api_base_url + '/shortlink/' + shortlink_id_or_url + '/'
return self.do_req('GET', shortlink_id_or_url).json() | Retrieve registered shortlink info
Arguments:
shortlink_id_or_url:
Shortlink id or url, assigned by mCASH |
def create_ledger(self, currency, description=None):
arguments = {'currency': currency,
'description': description}
return self.do_req('POST',
self.merchant_api_base_url + '/ledger/', arguments).json() | Create a ledger |
def update_ledger(self, ledger_id, description=None):
arguments = {'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/', arguments) | Update ledger info
Arguments:
ledger_id:
Ledger id assigned by mCASH
description:
Description of the Ledger and it's usage |
def close_report(self, ledger_id, report_id, callback_uri=None):
u
arguments = {'callback_uri': callback_uri}
return self.do_req('PUT',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/report/' +
report_id + '/', arguments) | u"""Close Report
When you PUT to a report, it will start the process of closing it. When
the closing process is complete (i.e. when report.status == 'closed')
mCASH does a POST call to callback_uri, if provided. This call will
contain JSON data similar to when GETing the Report.
Closing a report automatically open a new one.
The contents of a GET
/merchant/v1/ledger/<ledger_id>/report/<report_id>/ is included in
callback if callback is a secure URI, otherwise the link itself is sent
in callback.
Arguments:
ledger_id:
Id for ledger for report
report_id:
Report id assigned by mCASH
callback_uri:
Callback URI to be called when Report has finished closing. |
def get_report(self, ledger_id, report_id):
return self.do_req('GET',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/report/' +
report_id + '/').json() | Get report info
Arguments:
ledger_id:
Id for ledger for report
report_id:
Report id assigned by mCASH |
def create_permission_request(self, customer, pos_id, pos_tid, scope,
ledger=None, text=None, callback_uri=None,
expires_in=None):
arguments = {'customer': customer,
'pos_id': pos_id,
'pos_tid': pos_tid,
'scope': scope,
'ledger': ledger,
'text': text,
'callback_uri': callback_uri,
'expires_in': expires_in}
return self.do_req('POST',
self.merchant_api_base_url + '/permission_request/',
arguments).json() | Create permission request
The call is idempotent; that is, if one posts the same pos_id and
pos_tid twice, only one Permission request is created. |
def upload_receipt(self, url, data):
return self.upload_attachment(url=url, data=data, mime_type='application/vnd.mcash.receipt.v1+json') | Upload a receipt to the give url
:param url:
:param data:
:return: |
def handle_twitter_http_error(e, error_count, call_counter, time_window_start, wait_period):
if e.error_code == 401:
# Encountered 401 Error (Not Authorized)
raise e
elif e.error_code == 404:
# Encountered 404 Error (Not Found)
raise e
elif e.error_code == 429:
# Encountered 429 Error (Rate Limit Exceeded)
# Sleep for 15 minutes
error_count += 0.5
call_counter = 0
wait_period = 2
time.sleep(60*15 + 5)
time_window_start = time.perf_counter()
return error_count, call_counter, time_window_start, wait_period
elif e.error_code in (500, 502, 503, 504):
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
return error_count, call_counter, time_window_start, wait_period
else:
raise e | This function handles the twitter request in case of an HTTP error.
Inputs: - e: A twython.TwythonError instance to be handled.
- error_count: Number of failed retries of the call until now.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Outputs: - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Raises: - twython.TwythonError |
def make_bundle(bundle, fixed_version=None):
tmp_output_file_name = '%s.%s.%s' % (os.path.join(bundle.bundle_file_root, bundle.bundle_filename), 'temp', bundle.bundle_type)
iter_input = iter_bundle_files(bundle)
output_pipeline = processor_pipeline(bundle.processors, iter_input)
m = md5()
with open(tmp_output_file_name, 'wb') as output_file:
for chunk in output_pipeline:
m.update(chunk)
output_file.write(chunk)
hash_version = fixed_version or m.hexdigest()
output_file_name = bundle.get_path(hash_version)
os.rename(tmp_output_file_name, output_file_name)
return hash_version | Does all of the processing required to create a bundle and write it to disk, returning its hash version |
def get_db_distinct(queryset, field, func, **params):
while True:
try:
value = func(**params)
queryset.get(**{field: value})
except ObjectDoesNotExist:
break
return value | Checks if a field / value pair exists in database
and continues generating values until it finds
a one that does not exist
func is the function that generates values and
params is the parameters that function takes |
def get_model_class(name):
LOGGER.warning('Beware, function returns first match in the model registry.')
# iterate all registered models
for model in apps.get_models():
# return the app_label for first match
if name == model._meta.object_name:
app_label = model._meta.app_label
return apps.get_model(app_label, name) | This is being implemented to help
with the Email Module, where we
want to use a model for the email
context without needing to import
the model (which is most cases create
a circular dependency, anyway)
Beware that currently implementation
returns the first match, so if a model
with a same name exists in two different
applications this will not work
http://stackoverflow.com/a/13242421 |
def html_to_text(html_string):
# create a valid html document from string
# beware that it inserts <hmtl> <body> and <p> tags
# where needed
html_tree = html.document_fromstring(html_string)
# handle header tags
for h in html_tree.cssselect("h1, h2, h3, h4, h5, h6"):
# add two newlines after a header tag
h.text = h.text + '\n\n'
# handle links
# find all a tags starting from the root of the document //
# and replace the link with (link)
for a in html_tree.xpath("//a"):
href = a.attrib['href']
a.text = a.text + " (" + href + ")"
# handle paragraphs
for p in html_tree.xpath("//p"):
# keep the tail if there is one
# or add two newlines after the text if there is no tail
p.tail = p.tail if p.tail else "\n\n"
# handle breaks
for br in html_tree.xpath("//br"):
# add a newline and then the tail (remaining text after the <br/> tag)
# or add a newline only if there is no tail
# http://stackoverflow.com/questions/18660382/how-can-i-preserve-br-as-newlines-with-lxml-html-text-content-or-equivalent?rq=1
br.tail = "\n" + br.tail if br.tail else "\n"
return html_tree.text_content() | returns a plain text string when given a html string text
handles a, p, h1 to h6 and br, inserts newline chars to
create space in the string
@todo handle images |
def random_string(**kwargs):
n = kwargs.get('length', 10)
pool = kwargs.get('pool') or string.digits + string.ascii_lowercase
return ''.join(random.SystemRandom().choice(pool) for _ in range(n)) | By default generates a random string of 10 chars composed
of digits and ascii lowercase letters. String length and pool can
be override by using kwargs. Pool must be a list of strings |
def initialize(self, grid, num_of_paths, seed):
self.grid = grid
self.num_of_paths = num_of_paths
self.seed = seed
if self.initial_state.date is None:
self.initial_state.date = grid[0] | inits producer for a simulation run |
def initialize_worker(self, process_num=None):
self.initial_state.process = process_num
self.random.seed(hash(self.seed) + hash(process_num)) | inits producer for a simulation run on a single process |
def initialize_path(self, path_num=None):
self.state = copy(self.initial_state)
self.state.path = path_num
self.random.seed(hash(self.seed) + hash(path_num)) | inits producer for next path, i.e. sets current state to initial state |
def evolve(self, new_date):
if self.state.date == new_date and not self.initial_state.date == new_date:
return self.state
self.state.value = self.func(self.state, new_date)
self.state.date = new_date
return self.state | evolve to the new process state at the next date, i.e. do one step in the simulation
:param date new_date: date of the new state
:return State: |
def _run_parallel_process_with_profiling(self, start_path, stop_path, queue, filename):
runctx('Engine._run_parallel_process(self, start_path, stop_path, queue)', globals(), locals(), filename) | wrapper for usage of profiling |
def _run_parallel_process(self, start_path, stop_path, queue):
process_num = int(current_process().name.split('-', 2)[1])
self._run_process(start_path, stop_path, process_num)
queue.put(self.consumer.put()) | The function calls _run_process and puts results produced by
consumer at observations of top most consumer in to the queue |
def _run_process(self, start_path, stop_path, process_num=0):
# pre processing
self.producer.initialize_worker(process_num)
self.consumer.initialize_worker(process_num)
# processing
for path in range(start_path, stop_path):
self._run_path(path)
# post processing
self.consumer.finalize_worker(process_num) | The function calls _run_path for given set of paths |
def _run_path(self, path_num):
# pre processing
self.producer.initialize_path(path_num)
self.consumer.initialize_path(path_num)
# processing
for new_date in self.grid:
state = self.producer.evolve(new_date)
self.consumer.consume(state)
# post processing
self.consumer.finalize_path(path_num) | standalone function implementing a single loop of Monte Carlo
It returns list produced by consumer at observation dates
:param int path_num: path number |
def initialize(self, grid=None, num_of_paths=None, seed=None):
self.num_of_paths = num_of_paths
self.grid = grid
self.seed = seed
self.result = list()
self.state = self.initial_state | initialize consumer for simulation
:param num_of_paths: number of path
:type num_of_paths: int
:param grid: list of grid point
:type grid: list(date)
:param seed: simulation seed
:type seed: hashable |
def initialize_worker(self, process_num=None):
self.initialize(self.grid, self.num_of_paths, self.seed) | reinitialize consumer for process in multiprocesing |
def initialize_path(self, path_num=None):
self.state = copy(self.initial_state)
return self.state | initialize consumer for next path |
def consume(self, state):
self.state.append(self.func(state))
return self.state | consume new producer state |
def finalize(self):
# todo sort self.result by path_num
if self.result:
self.result = sorted(self.result, key=lambda x: x[0])
p, r = map(list, zip(*self.result))
self.result = r | finalize simulation for consumer |
def get(self, queue_get):
if isinstance(queue_get, (tuple, list)):
self.result.extend(queue_get) | to get states from multiprocessing.queue |
def register_app_activity():
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from .models import Activity
# TO-DO: Add check for existence of setting
if not hasattr(settings, 'ACTIVITY_MONITOR_MODELS'):
return
for item in settings.ACTIVITY_MONITOR_MODELS:
try:
app_label, model = item['model'].split('.', 1)
content_type = ContentType.objects.get(app_label=app_label, model=model)
model = content_type.model_class()
Activity.objects.follow_model(model)
except ContentType.DoesNotExist:
pass | Create watchers for models defined in settings.py.
Once created, they will be passed over
Activity.objects.follow_model(), which lives in managers.py |
def clear_path(path):
from time import time
if not os.path.exists(path):
return
if TRASH_PATH == '.':
shutil.rmtree(path, ignore_errors=True)
else:
shutil.move(path, '%s/%s_%s' % (
TRASH_PATH, os.path.basename(path), time())) | This will move a path to the Trash folder
:param path: str of the path to remove
:return: None |
def _md5_of_file(sub_string):
md5 = hashlib.md5()
file_path = sub_string
if not os.path.exists(file_path):
file_path = os.path.join(os.environ['CAFE_DATA_DIR_PATH'], file_path)
if not os.path.exists(file_path):
file_path = file_path.replace(' ', '_')
assert (os.path.exists(file_path)), "File %s doesn't exist" % file_path
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest() | This will return the md5 of the file in sub_string
:param sub_string: str of the path or relative path to a file
:return: str |
def read_local_file(filename):
frm = inspect.currentframe().f_back
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
path = os.path.split(frm.f_code.co_filename)[0]
return read_file(os.path.join(path, filename)) | This will read a file in the same directory as the calling function
:param filename: str of the basename of the file
:return: str of the content of the file |
def relative_path(sub_directory='', function_index=1):
frm = inspect.currentframe()
for i in range(function_index):
frm = frm.f_back
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
if not isinstance(sub_directory, list):
sub_directory = sub_directory.replace('\\','/').split('/')
path = os.path.split(frm.f_code.co_filename)[0]
if sub_directory:
path = os.path.abspath(os.path.join(path, *sub_directory))
return path | This will return the file relative to this python script
:param subd_irectory: str of the relative path
:param function_index: int of the number of function calls to go back
:return: str of the full path |
def copy_file(source_file, destination_file, soft_link=False):
if not os.path.exists(source_file):
raise IOError("No such file: %s" % source_file)
mkdir_for_file(destination_file)
if os.path.exists(destination_file):
os.remove(destination_file)
if os.name == 'posix' and soft_link:
try:
os.symlink(source_file, destination_file)
except:
shutil.copy(source_file, destination_file)
else:
try:
shutil.copy(source_file, destination_file)
except Exception:
raise | :param source_file: str of the full path to the source file
:param destination_file: str of the full path to the destination file
:param soft_link: bool if True will soft link if possible
:return: None |
def read_folder(folder, ext='*', uppercase=False, replace_dot='.', parent=''):
ret = {}
if os.path.exists(folder):
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
child = read_folder(os.path.join(folder, file),
ext, uppercase, replace_dot,
parent=parent + file + '/')
ret.update(child)
else:
if ext == '*' or file.endswith(ext):
key = file.replace('.', replace_dot)
key = uppercase and key.upper() or key
ret[parent + key] = read_file(os.path.join(folder, file))
return ret | This will read all of the files in the folder with the extension equal
to ext
:param folder: str of the folder name
:param ext: str of the extension
:param uppercase: bool if True will uppercase all the file names
:param replace_dot: str will replace "." in the filename
:param parent: str of the parent folder
:return: dict of basename with the value of the text in the file |
def find_path(target, from_path=None, direction='both', depth_first=False):
from_path = from_path if from_path else relative_path('', 2)
if direction == 'up' or direction == 'both':
path = from_path
for i in range(100):
try:
file_path = os.path.abspath(os.path.join(path, target))
if os.path.exists(file_path):
return file_path
path = os.path.split(path)[0]
if len(path) <= 1:
break
except Exception:
break
if os.path.exists(os.path.join(path, target)):
return os.path.join(path, target)
if direction == 'down' or direction == 'both':
check = ['']
while len(check) != 0:
dir = check.pop(0)
try:
roster = os.listdir(os.path.join(from_path, dir))
except Exception:
continue # ignore directories that are inaccessible
if target in roster:
return os.path.join(from_path, dir, target)
else:
stack = [os.path.join(from_path, dir, i)
for i in roster if '.' not in i]
if depth_first:
check = stack + check
else:
check += stack
raise FileNotFoundError("Failed to find file: %s from %s", file, from_path) | Finds a file or subdirectory from the given
path, defaulting to a breadth-first search.
:param target: str of file or subdirectory to be found
:param from_path: str of path from which to search (defaults to relative)
:param direction: str enum of up, down, both
:param depth_first: bool of changes search to depth-first
:return: str of path to desired file or subdirectory |
def walk_revctrl(dirname='', ff=''):
file_finder = None
items = []
if not ff:
distutils.log.error('No file-finder passed to walk_revctrl')
sys.exit(1)
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
if ff == ep.name:
distutils.log.info('using %s file-finder', ep.name)
file_finder = ep.load()
finder_items = []
with pythonpath_off():
for item in file_finder(dirname):
if not basename(item).startswith(('.svn', '.hg', '.git')):
finder_items.append(item)
distutils.log.info('%d files found', len(finder_items))
items.extend(finder_items)
if file_finder is None:
distutils.log.error('Failed to load %s file-finder; setuptools-%s extension missing?',
ff, 'subversion' if ff == 'svn' else ff)
sys.exit(1)
# Returning a non-empty list prevents egg_info from reading the
# existing SOURCES.txt
return items or [''] | Return files found by the file-finder 'ff'. |
def cleanup_pycache():
try:
for file in glob.glob('setup.py[co]'):
os.remove(file)
if isdir('__pycache__'):
for file in glob.glob(join('__pycache__', 'setup.*.py[co]')):
os.remove(file)
if not glob.glob(join('__pycache__', '*')):
os.rmdir('__pycache__')
except (IOError, OSError):
pass | Remove .pyc files we leave around because of import. |
def run(args, ff=''):
import setuptools.command.egg_info
if ff == 'none':
setuptools.command.egg_info.walk_revctrl = no_walk_revctrl
else:
setuptools.command.egg_info.walk_revctrl = partial(walk_revctrl, ff=ff)
sys.argv = ['setup.py'] + args
import setup
cleanup_pycache() | Run setup.py with monkey patches applied. |
def setup_logging(config_path=None, log_level=logging.INFO,
formatter='standard'):
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format':
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
},
'ui': {
'format':
'[%(levelname)s] %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': log_level,
'formatter': formatter,
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': log_level,
'propagate': True
},
'yapsy': {
'handlers': ['console'],
'level': logging.INFO
}
}
}
if config_path:
if os.path.exists(config_path):
with open(config_path, 'rt') as f:
config = yaml.safe_load(f.read())
else:
print('Specified path does not exist: {}, '
'using default config'.format(config_path))
logging.config.dictConfig(config) | Setup logging configuration |
def _get_sorted_iterator(self, iterator):
lines = list(next(iterator))
if len(lines) < self.max_lines:
return iter(sorted(lines, key=self.key))
import tempfile
tmp_dir = tempfile.mkdtemp()
fnames = self._split(chain([lines], iterator), tmp_dir)
return SortedIteratorMerger([unpickle_iter(open(fname, 'rb')) for fname in fnames], self.key) | Get the iterator over the sorted items.
This function decides whether the items can be sorted in memory or on disk.
:return: |
def _split(self, iterator, tmp_dir):
fnames = []
for i, lines in enumerate(iterator):
lines = list(lines)
out_fname = os.path.join(tmp_dir, self.TMP_FNAME.format(i + 1))
self._write(lines, out_fname)
fnames.append(out_fname)
if len(lines) < self.max_lines:
break
return fnames | Splits the file into several chunks.
If the original file is too big to fit in the allocated space, the sorting will be split into several chunks,
then merged.
:param tmp_dir: Where to put the intermediate sorted results.
:param orig_lines: The lines read before running out of space.
:return: The names of the intermediate files. |
def _write(self, lines, fname):
with open(fname, 'wb') as out_fhndl:
for line in sorted(lines, key=self.key):
pickle.dump(line, out_fhndl) | Writes a intermediate temporary sorted file
:param lines: The lines to write.
:param fname: The name of the temporary file.
:return: |
def get_iso_time(date_part, time_part):
r
str_date = datetime.datetime.strptime(
date_part, '%m/%d/%Y').strftime('%Y-%m-%d')
str_time = datetime.datetime.strptime(
time_part, '%I:%M %p').strftime('%H:%M:%S')
return str_date + "T" + str_time + "-7:00" | r"""Combign date and time into an iso datetime. |
def validate(self):
r
try:
with requests.Session() as session:
result = session.get('https://www.srpnet.com/')
result = session.post(
'https://myaccount.srpnet.com/sso/login/loginuser',
data={'UserName': self.username, 'Password': self.password}
)
result_string = result.content.decode("utf-8")
soup = BeautifulSoup(result_string, "html.parser")
account_select = soup.find(
'select', attrs={'name': 'accountNumber'}
)
accounts = []
for option in account_select.find_all('option'):
if option['value'] != 'newAccount':
accounts.append(option['value'])
valid = len(accounts) > 0
return valid
except Exception: # pylint: disable=W0703
return False | r"""Validate user credentials.
Returns
-------
bool
Examples
--------
Validate credentials.
>>> from srpenergy.client import SrpEnergyClient
>>>
>>> accountid = 'your account id'
>>> username = 'your username'
>>> password = 'your password'
>>> client = SrpEnergyClient(accountid, username, password)
>>>
>>> valid = client.validate()
>>> print(valid)
True |
def get_user_list(host_name, client_name, client_pass):
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getusrs",
values="whr=*")
# Make request.
request_result = send_request(host_name, request)
# Extract a python list from xml object.
user_id_list = list()
append_user_id = user_id_list.append
if request_result is not None:
user_list_xml = request_result.text
tree = etree.parse(StringIO(user_list_xml))
root = tree.getroot()
xml_rows = root.findall("./result/row/usr")
for xml_row in xml_rows:
append_user_id(xml_row.text)
return user_id_list | Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids. |
def add_features(host_name, client_name, client_pass, feature_names):
init_feats = ("&".join(["%s=0"]*len(feature_names))) % tuple(feature_names)
features_req = construct_request("pers",
client_name,
client_pass,
"addftr",
init_feats)
send_request(host_name,
features_req) | Add a number of numerical features in the client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names. |
def delete_features(host_name, client_name, client_pass, feature_names=None):
# Get all features.
if feature_names is None:
feature_names = get_feature_names(host_name,
client_name,
client_pass)
# Remove all features.
feature_to_be_removed = ("&".join(["ftr=%s"]*len(feature_names))) % tuple(feature_names)
features_req = construct_request("pers",
client_name,
client_pass,
'remftr',
feature_to_be_removed)
send_request(host_name,
features_req) | Remove a number of numerical features in the client. If a list is not provided, remove all features.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names. |
def get_feature_names(host_name, client_name, client_pass):
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getftrdef",
values="ftr=*")
# Send request.
request_result = send_request(host_name,
request)
# Extract a python list from xml object.
feature_names = list()
append_feature_name = feature_names.append
if request_result is not None:
feature_names_xml = request_result.text
tree = etree.parse(StringIO(feature_names_xml))
root = tree.getroot()
xml_rows = root.findall("row/ftr")
for xml_row in xml_rows:
append_feature_name(xml_row.text)
return feature_names | Get the names of all features in a PServer client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - feature_names: A python list of feature names. |
def insert_user_data(host_name, client_name, client_pass, user_twitter_id, topic_to_score):
# Construct values.
values = "usr=" + str(user_twitter_id)
for topic, score in topic_to_score.items():
values += "&type." + topic + "=%.2f" % score
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="setusr",
values=values)
# Send request.
send_request(host_name,
request) | Inserts topic/score data for a user to a PServer client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- user_twitter_id: A Twitter user identifier.
- topic_to_score: A python dictionary that maps from topic to score. |
def construct_request(model_type, client_name, client_pass, command, values):
base_request = ("{model_type}?"
"clnt={client_name}|{client_pass}&"
"com={command}&{values}".format(model_type=model_type,
client_name=client_name,
client_pass=client_pass,
command=command,
values=values))
return base_request | Construct the request url.
Inputs: - model_type: PServer usage mode type.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- command: A PServer command.
- values: PServer command arguments.
Output: - base_request: The base request string. |
def send_request(host_name, request):
request = "%s%s" % (host_name, request)
# print(request)
try:
result = requests.get(request)
if result.status_code == 200:
return result
else:
# print(result.status_code)
raise Exception
except Exception as e:
# print(e)
raise e | Sends a PServer url request.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- request: The url request. |
def update_feature_value(host_name, client_name, client_pass, user_twitter_id, feature_name, feature_score):
username = str(user_twitter_id)
feature_value = "{0:.2f}".format(feature_score)
joined_ftr_value = "ftr_" + feature_name + "=" + str(feature_value)
values = "usr=%s&%s" % (username, joined_ftr_value)
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="setusr",
values=values)
# Send request.
send_request(host_name,
request) | Updates a single topic score, for a single user.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- user_twitter_id: A Twitter user identifier.
- feature_name: A specific PServer feature name.
- feature_score: The corresponding score. |
def init_app(self, app):
self.app = app
self.log = app.logger.getChild('compass')
self.log.debug("Initializing compass integration")
self.compass_path = self.app.config.get('COMPASS_PATH', 'compass')
self.config_files = self.app.config.get('COMPASS_CONFIGS', None)
self.requestcheck_debug_only = self.app.config.get(
'COMPASS_REQUESTCHECK_DEBUG_ONLY', True)
self.skip_mtime_check = self.app.config.get(
'COMPASS_SKIP_MTIME_CHECK', False)
self.debug_only = self.app.config.get(
'COMPASS_DEBUG_ONLY', False)
self.disabled = self.app.config.get('COMPASS_DISABLED', False)
if not self.debug_only:
self.compile()
if (not self.debug_only) \
and (not self.requestcheck_debug_only or self.app.debug):
self.app.after_request(self.after_request) | Initialize the application once the configuration has been loaded
there. |
def compile(self):
if self.disabled:
return
self._check_configs()
for _, cfg in self.configs.iteritems():
cfg.parse()
if cfg.changes_found() or self.skip_mtime_check:
self.log.debug("Changes found for " + cfg.path \
+ " or checks disabled. Compiling...")
cfg.compile(self) | Main entry point that compiles all the specified or found compass
projects. |
def after_request(self, response):
if response is not None and request is not None:
# When used as response processor, only run if we are requesting
# anything but a static resource.
if request.endpoint in [None, "static"]:
return response
self.compile()
return response | after_request handler for compiling the compass projects with
each request. |
def _check_configs(self):
configs = set(self._find_configs())
known_configs = set(self.configs.keys())
new_configs = configs - known_configs
for cfg in (known_configs - configs):
self.log.debug("Compass configuration has been removed: " + cfg)
del self.configs[cfg]
for cfg in new_configs:
self.log.debug("Found new compass configuration: " + cfg)
self.configs[cfg] = CompassConfig(cfg) | Reloads the configuration files. |
def _find_configs(self):
if self.config_files is not None:
return self.config_files
# Walk the whole project tree and look for "config.rb" files
result = []
for path, _, files in os.walk(self.app.root_path):
if "config.rb" in files:
result.append(os.path.join(path, "config.rb"))
return result | Scans the project directory for config files or returns
the explicitly specified list of files. |
def parse(self, replace=False):
if self.last_parsed is not None \
and self.last_parsed > os.path.getmtime(self.path) \
and not replace:
return
self.last_parsed = time.time()
with open(self.path, 'r') as file_:
for line in file_:
match = CONFIG_LINE_RE.match(line.rstrip())
if match:
if match.group(1) == 'sass_dir':
self.src = os.path.join(
self.base_dir, match.group(2)[1:-1])
elif match.group(1) == 'css_dir':
self.dest = os.path.join(
self.base_dir, match.group(2)[1:-1]) | Parse the given compass config file |
def changes_found(self):
if self.dest is None:
warnings.warn("dest directory not found!")
if self.src is None:
warnings.warn("src directory not found!")
if self.src is None or self.dest is None:
return False
dest_mtime = -1
src_mtime = os.path.getmtime(self.src)
if os.path.exists(self.dest):
dest_mtime = os.path.getmtime(self.dest)
if src_mtime >= dest_mtime:
return True # changes found
for folder, _, files in os.walk(self.src):
for filename in fnmatch.filter(files, '*.scss'):
src_path = os.path.join(folder, filename)
if os.path.getmtime(src_path) >= dest_mtime:
return True
return False | Returns True if the target folder is older than the source folder. |
def compile(self, compass):
try:
output = subprocess.check_output(
[compass.compass_path, 'compile', '-q'],
cwd=self.base_dir)
os.utime(self.dest, None)
compass.log.debug(output)
except OSError, e:
if e.errno == errno.ENOENT:
compass.log.error("Compass could not be found in the PATH " +
"and/or in the COMPASS_PATH setting! " +
"Disabling compilation.")
compass.disabled = True
else:
raise e | Calls the compass script specified in the compass extension
with the paths provided by the config.rb. |
def _remove_otiose(lst):
listtype = type([])
while type(lst) == listtype and len(lst) == 1:
lst = lst[0]
return lst | lift deeply nested expressions out of redundant parentheses |
def _compose_range(pattern, rule, fill=2):
keys = []
mask = len(pattern)
for rule in str.split(rule, ","):
if not '-' in rule:
if rule[:mask] == pattern:
keys.append(rule[mask:])
else:
keys.append(rule)
else:
(start, end) = str.split(rule, '-')
if rule[:mask] == pattern:
start = int(start[mask:])
else:
start = int(start)
# Since I allow both "Week00-15" and "Week00-Week15", I need
# to check for the second week.
if end[0:mask] == pattern:
end = int(end[mask:])
else:
end = int(end)
key = "%%0%ii" % fill
for i in range(start, end + 1):
keys.append(key % i)
#print keys
return keys | oc._compose_range('Week', 'Week04-Week09', fill=2) - hash a range.
This takes apart a range of times and returns a dictionary of
all intervening values appropriately set. The fill value is
used to format the time numbers. |
def is_holiday(now=None, holidays="/etc/acct/holidays"):
now = _Time(now)
# Now, parse holiday file.
if not os.path.exists(holidays):
raise Exception("There is no holidays file: %s" % holidays)
f = open(holidays, "r")
# First, read all leading comments.
line = f.readline()
while line[0] == '*': line = f.readline()
# We just got the year line.
(year, primestart, primeend) = str.split(line)
# If not the right year, we have no idea for certain. Skip.
if not year == now.year: return 0
# Now the dates. Check each against now.
while line != '':
# Of course, ignore comments.
if line[0] == '*':
line = f.readline()
continue
try:
# Format: "1/1 New Years Day"
(month, day) = str.split(str.split(line)[0], "/")
# The _Time class has leading-zero padded day numbers.
if len(day) == 1: day = '0' + day
# Get month number from index map (compensate for zero indexing).
month = MONTH_MAP[int(month) - 1]
# Check the date.
#print month, now.month, day, now.day
if month == now.month and day == now.day:
return 1
line = f.readline()
except:
# Skip malformed lines.
line = f.readline()
continue
# If no match found, we must not be in a holiday.
return 0 | is_holiday({now}, {holidays="/etc/acct/holidays"} |
def flatten(self, lst=None):
tree = []
uops = [] # accumulated unary operations
s = Stack()
group_len = 0 # in current precendence group
for item in lst:
if type(item) == type([]):
# Subexpression.
tree = tree + self.flatten(item)
group_len = group_len + 1
# Unary ops dump, for things like: '!(Monday|Wednesday)'
for uop in uops:
tree.append(uop)
uops = []
elif item in self.ops and item not in self.uops:
# Operator.
if not s.empty():
prev_op = s.pop()
# If the precendence of the previous operation is
# higher then dump out everything so far, ensuring the
# order of evaluation.
if _precedence[prev_op] > _precedence[item]:
s.push(prev_op) # put it back
for i in range(group_len - 1):
tree.append(s.pop())
group_len = 0
else:
s.push(prev_op)
s.push(item)
else:
s.push(item)
elif item in self.uops:
uops.append(item)
else:
# Token of some sort.
tree.append(item)
group_len = group_len + 1
# Dump any unary operations.
for uop in uops:
tree.append(uop)
uops = []
while not s.empty():
tree.append(s.pop())
# Drop any remaining unary operations.
for uop in uops:
tree.append(uop)
return tree | syntax.flatten(token_stream) - compile period tokens
This turns a stream of tokens into p-code for the trivial
stack machine that evaluates period expressions in in_period. |
def get_work_commits(repo_addr, ascending = True, tz = 'US/Eastern', correct_times = True):
repo = git.Repo(repo_addr)
commits = list(repo.iter_commits())
logs = [(c.authored_datetime, c.message.strip('\n'), str(c)) for c in repo.iter_commits()]
work = pd.DataFrame.from_records(logs, columns = ['time', 'message', 'hash'])
work.time = pd.DatetimeIndex([pd.Timestamp(i).tz_convert(tz) for i in work.time])
work.set_index('time', inplace = True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
work = work.sort_index(ascending = ascending)
if correct_times:
work = adjust_time(work)
return work, repo | Retrives work commits from repo |
def get_topic_set(file_path):
topic_set = set()
file_row_gen = get_file_row_generator(file_path, ",") # The separator here is irrelevant.
for file_row in file_row_gen:
topic_set.add(file_row[0])
return topic_set | Opens one of the topic set resource files and returns a set of topics.
- Input: - file_path: The path pointing to the topic set resource file.
- Output: - topic_set: A python set of strings. |
def get_reveal_set():
file_path = get_package_path() + "/twitter/res/topics/story_set.txt"
story_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/theme_set.txt"
theme_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/attribute_set.txt"
attribute_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/stance_set.txt"
stance_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/geographical_set.txt"
geographical_topics = get_topic_set(file_path)
topics = story_topics | theme_topics | attribute_topics | stance_topics | geographical_topics
return topics | Returns a set of all the topics that are interesting for REVEAL use-cases. |
def get_topic_keyword_dictionary():
topic_keyword_dictionary = dict()
file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt",
",",
"utf-8")
for file_row in file_row_gen:
topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]])
return topic_keyword_dictionary | Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary. |
def get_surveys(self):
payload = {
'Request': 'getSurveys',
'Format': 'JSON'
}
r = self._session.get(QUALTRICS_URL, params=payload)
output = r.json()
return output['Result']['Surveys'] | Gets all surveys in account
Args:
None
Returns:
list: a list of all surveys |
def get_input(self, name, ds):
columns = self.inputs.get(name)
df = ds.get_dataframe()
# set defaults
for column in columns:
if column not in df.columns:
df[column] = self.defaults.get(column)
return df[columns] | Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.
Args:
name (str): The name of the input.
ds (openflow.DataSource): The DataSource that will feed the data.
Returns:
pandas.DataFrame: The content of the input. |
def domain_relationship(self):
if self.__domain_relationship is None:
ent = self.relator.get_entity()
self.__domain_relationship = \
self.descriptor.make_relationship(ent)
return self.__domain_relationship | Returns a domain relationship equivalent with this resource
relationship. |
def fit(self):
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth]) | Fit MCMC AgeDepthModel |
def date(self, proxy, how='median', n=500):
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out) | Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord |
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax | Age-depth plot |
def agedepth(self, d):
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out | Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth. |
def plot_prior_dates(self, dwidth=30, ax=None):
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax | Plot prior chronology dates in age-depth plot |
def plot_sediment_rate(self, ax=None):
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax | Plot sediment accumulation rate prior and posterior distributions |
def plot_sediment_memory(self, ax=None):
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax | Plot sediment memory prior and posterior distributions |
def qpinfo():
parser = qpinfo_parser()
args = parser.parse_args()
path = pathlib.Path(args.path).resolve()
try:
ds = load_data(path)
except UnknownFileFormatError:
print("Unknown file format: {}".format(path))
return
print("{} ({})".format(ds.__class__.__doc__, ds.__class__.__name__))
print("- number of images: {}".format(len(ds)))
for key in ds.meta_data:
print("- {}: {}".format(key, ds.meta_data[key])) | Print information of a quantitative phase imaging dataset |
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust) | :param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver |
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'] | Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node |
def get_node(manager, handle_id, legacy=True):
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id) | :param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node |
def get_node_bundle(manager, handle_id=None, node=None):
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d | :param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict |
def delete_node(manager, handle_id):
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True | Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool |
def get_relationship(manager, relationship_id, legacy=True):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id)) | :param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.