docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Verify that the input HDUList is for a waivered FITS file.
Parameters:
waiveredHdul HDUList object to be verified
Returns: None
Exceptions:
ValueError Input HDUList is not for a waivered FITS file
|
def _verify(waiveredHdul):
if len(waiveredHdul) == 2:
#
# There must be exactly 2 HDU's
#
if waiveredHdul[0].header['NAXIS'] > 0:
#
# The Primary HDU must have some data
#
if isinstance(waiveredHdul[1], fits.TableHDU):
#
# The Alternate HDU must be a TableHDU
#
if waiveredHdul[0].data.shape[0] == \
waiveredHdul[1].data.shape[0] or \
waiveredHdul[1].data.shape[0] == 1:
#
# The number of arrays in the Primary HDU must match
# the number of rows in the TableHDU. This includes
# the case where there is only a single array and row.
#
return
#
# Not a valid waivered Fits file
#
raise ValueError("Input object does not represent a valid waivered" + \
" FITS file")
| 808,500 |
Return the current timestamp in machine local time.
Parameters:
-----------
time, date : Boolean
Flag to include the time or date components, respectively,
in the output.
fmt : str, optional
If passed, will override the time/date choice and use as
the format string passed to `strftime`.
|
def get_timestamp(time=True, date=True, fmt=None):
time_format = "%H:%M:%S"
date_format = "%m-%d-%Y"
if fmt is None:
if time and date:
fmt = time_format + " " + date_format
elif time:
fmt = time_format
elif date:
fmt = date_format
else:
raise ValueError("One of `date` or `time` must be True!")
return datetime.now().strftime(fmt)
| 808,887 |
Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
|
def _get_archive_filelist(filename):
# type: (str) -> List[str]
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names
| 809,025 |
Create a new user instance.
Args:
model_class:
The type of model to create an instance of.
args:
Positional arguments to create the instance with.
kwargs:
Keyword arguments to create the instance with.
Returns:
A new user instance of the type specified by
``model_class``.
|
def _create(cls, model_class, *args, **kwargs):
manager = cls._get_manager(model_class)
return manager.create_user(*args, **kwargs)
| 809,283 |
Update the instance the serializer is bound to.
Args:
instance:
The instance the serializer is bound to.
validated_data:
The data to update the serializer with.
Returns:
The updated instance.
|
def update(self, instance, validated_data):
is_primary = validated_data.pop("is_primary", False)
instance = super(EmailSerializer, self).update(
instance, validated_data
)
if is_primary:
instance.set_primary()
return instance
| 809,302 |
Validate the provided email address.
Args:
email:
The email address to validate.
Returns:
The provided email address, transformed to match the RFC
spec. Namely, the domain portion of the email must be
lowercase.
|
def validate_email(self, email):
user, domain = email.rsplit("@", 1)
return "@".join([user, domain.lower()])
| 809,312 |
Get a user by their ID.
Args:
user_id:
The ID of the user to fetch.
Returns:
The user with the specified ID if they exist and ``None``
otherwise.
|
def get_user(self, user_id):
try:
return get_user_model().objects.get(id=user_id)
except get_user_model().DoesNotExist:
return None
| 809,414 |
Save the provided data using the class' serializer.
Args:
request:
The request being made.
Returns:
An ``APIResponse`` instance. If the request was successful
the response will have a 200 status code and contain the
serializer's data. Otherwise a 400 status code and the
request's errors will be returned.
|
def post(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 809,552 |
Starts listening to events.
Args:
timeout (int): number of seconds before timeout. Used for testing purpose only.
root_object (bambou.NURESTRootObject): NURESTRootObject object that is listening. Used for testing purpose only.
|
def start(self, timeout=None, root_object=None):
if self._is_running:
return
if timeout:
self._timeout = timeout
self._start_time = int(time())
pushcenter_logger.debug("[NURESTPushCenter] Starting push center on url %s ..." % self.url)
self._is_running = True
self.__root_object = root_object
from .nurest_session import NURESTSession
current_session = NURESTSession.get_current_session()
args_session = {'session': current_session}
self._thread = StoppableThread(target=self._listen, name='push-center', kwargs=args_session)
self._thread.daemon = True
self._thread.start()
| 810,626 |
Registers a new delegate callback
The prototype should be function(data), where data will be the decoded json push
Args:
callback (function): method to trigger when push center receives events
|
def add_delegate(self, callback):
if callback in self._delegate_methods:
return
self._delegate_methods.append(callback)
| 810,631 |
Unregisters a registered delegate function or a method.
Args:
callback(function): method to trigger when push center receives events
|
def remove_delegate(self, callback):
if callback not in self._delegate_methods:
return
self._delegate_methods.remove(callback)
| 810,632 |
Initializes a new sesssion
Args:
username (string): the username
password (string): the password
enterprise (string): the enterprise
api_url (string): the url to the api
version (string): the version of the api to target
Example:
>>> mainsession = NUMySession(username="csproot", password="csproot", enterprise="csp", api_url="https://vsd:8443")
|
def __init__(self, username, password, enterprise, api_url, api_prefix, version, certificate=None):
self._root_object = None
self._login_controller = NURESTLoginController()
self._login_controller.user = username
self._login_controller.password = password
self._login_controller.certificate = certificate
self._login_controller.user_name = username
self._login_controller.enterprise = enterprise
self._login_controller.url = '%s/%s/v%s' % (api_url, api_prefix, str(version).replace('.', '_'))
self._push_center = NURESTPushCenter()
self._push_center.url = self._login_controller.url
self.requests_session = requests.Session()
| 810,747 |
Verify if the fetcher contains the given NURESTObject
Args:
nurest_object (bambou.NURESTObject): the NURESTObject object to verify
Returns:
Returns True if the object has been found. False otherwise
|
def __contains__(self, nurest_object):
for obj in self:
if obj.equals(nurest_object):
return True
return False
| 810,980 |
Get index of the given item
Args:
nurest_object (bambou.NURESTObject): the NURESTObject object to verify
Returns:
Returns the position of the object.
Raises:
Raise a ValueError exception if object is not present
|
def index(self, nurest_object):
for index, obj in enumerate(self):
if obj.equals(nurest_object):
return index
raise ValueError("%s is not in %s" % (nurest_object, self))
| 810,981 |
Register the fetcher for a served object.
This method will fill the fetcher with `managed_class` instances
Args:
parent_object: the instance of the parent object to serve
Returns:
It returns the fetcher instance.
|
def fetcher_with_object(cls, parent_object, relationship="child"):
fetcher = cls()
fetcher.parent_object = parent_object
fetcher.relationship = relationship
rest_name = cls.managed_object_rest_name()
parent_object.register_fetcher(fetcher, rest_name)
return fetcher
| 810,982 |
Prepare headers for the given request
Args:
request: the NURESTRequest to send
filter: string
order_by: string
group_by: list of names
page: int
page_size: int
|
def _prepare_headers(self, request, filter=None, order_by=None, group_by=[], page=None, page_size=None):
if filter:
request.set_header('X-Nuage-Filter', filter)
if order_by:
request.set_header('X-Nuage-OrderBy', order_by)
if page is not None:
request.set_header('X-Nuage-Page', str(page))
if page_size:
request.set_header('X-Nuage-PageSize', str(page_size))
if len(group_by) > 0:
header = ", ".join(group_by)
request.set_header('X-Nuage-GroupBy', 'true')
request.set_header('X-Nuage-Attributes', header)
| 810,983 |
returns detected dialect of filepath and sets self.has_header
if not passed in __init__ kwargs
Arguments:
filepath (str): filepath of target csv file
|
def _dialect(self, filepath):
with open(filepath, self.read_mode) as csvfile:
sample = csvfile.read(1024)
dialect = csv.Sniffer().sniff(sample)
if self.has_header == None:
# detect header if header not specified
self.has_header = csv.Sniffer().has_header(sample)
csvfile.seek(0)
return dialect
| 811,042 |
Intializes a new connection for a given request
NURESTConnection object is in charge of the HTTP call. It relies on request library
Args:
request: the NURESTRequest to send
callback: the method that will be fired after sending
callbacks: a dictionary of user callbacks. Should contains local and remote callbacks
|
def __init__(self, request, async, callback=None, callbacks=dict(), root_object=None):
self._uses_authentication = True
self._has_timeouted = False
# self._is_cancelled = False
self._ignore_request_idle = False
self._xhr_timeout = 3000
self._response = None
self._error_message = None
self._transaction_id = uuid.uuid4().hex
self._request = request
self._async = async
self._callback = callback
self._callbacks = callbacks
self._user_info = None
self._object_last_action_timer = None
self._root_object = root_object
| 811,082 |
Initializes the object with general information
Args:
creation_date: float representing time since epoch
id: identifier of the object
local_id: internal identifier of the object
owner: string representing the owner
parent_id: identifier of the object's parent
parent_type: type of the parent
|
def __init__(self):
self._local_id = str(uuid4())
self._creation_date = None
self._last_updated_date = None
self._id = None
self._owner = None
self._parent_id = None
self._parent_type = None
self._parent = None
self._is_dirty = False
self._attribute_errors = dict()
self._attributes = dict()
self.expose_attribute(local_name='id', remote_name=BambouConfig.get_id_remote_name(), attribute_type=BambouConfig.get_id_type(), is_identifier=True)
self.expose_attribute(local_name='parent_id', remote_name='parentID', attribute_type=str)
self.expose_attribute(local_name='parent_type', remote_name='parentType', attribute_type=str)
self.expose_attribute(local_name='creation_date', remote_name='creationDate', attribute_type=float, is_editable=False)
self.expose_attribute(local_name='last_updated_date', remote_name='lastUpdatedDate', attribute_type=float, is_editable=False)
self.expose_attribute(local_name='owner', attribute_type=str, is_readonly=True)
self._fetchers_registry = dict()
| 811,162 |
Compute the arguments
Try to import attributes from data.
Otherwise compute kwargs arguments.
Args:
data: a dict()
kwargs: a list of arguments
|
def _compute_args(self, data=dict(), **kwargs):
for name, remote_attribute in self._attributes.items():
default_value = BambouConfig.get_default_attribute_value(self.__class__, name, remote_attribute.attribute_type)
setattr(self, name, default_value)
if len(data) > 0:
self.from_dict(data)
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
| 811,163 |
Sets all the exposed ReST attribues from the given dictionary
Args:
dictionary (dict): dictionnary containing the raw object attributes and their values.
Example:
>>> info = {"name": "my group", "private": False}
>>> group = NUGroup()
>>> group.from_dict(info)
>>> print "name: %s - private: %s" % (group.name, group.private)
"name: my group - private: False"
|
def from_dict(self, dictionary):
for remote_name, remote_value in dictionary.items():
# Check if a local attribute is exposed with the remote_name
# if no attribute is exposed, return None
local_name = next((name for name, attribute in self._attributes.items() if attribute.remote_name == remote_name), None)
if local_name:
setattr(self, local_name, remote_value)
else:
# print('Attribute %s could not be added to object %s' % (remote_name, self))
pass
| 811,179 |
Delete object and call given callback in case of call.
Args:
response_choice (int): Automatically send a response choice when confirmation is needed
async (bool): Boolean to make an asynchronous call. Default is False
callback (function): Callback method that will be triggered in case of asynchronous call
Example:
>>> entity.delete() # will delete the enterprise from the server
|
def delete(self, response_choice=1, async=False, callback=None):
return self._manage_child_object(nurest_object=self, method=HTTP_METHOD_DELETE, async=async, callback=callback, response_choice=response_choice)
| 811,180 |
Update object and call given callback in case of async call
Args:
async (bool): Boolean to make an asynchronous call. Default is False
callback (function): Callback method that will be triggered in case of asynchronous call
Example:
>>> entity.name = "My Super Object"
>>> entity.save() # will save the new name in the server
|
def save(self, response_choice=None, async=False, callback=None):
return self._manage_child_object(nurest_object=self, method=HTTP_METHOD_PUT, async=async, callback=callback, response_choice=response_choice)
| 811,181 |
Set API URL endpoint
Args:
url: the url of the API endpoint
|
def url(self, url):
if url and url.endswith('/'):
url = url[:-1]
self._url = url
| 811,257 |
Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
|
def impersonate(self, user, enterprise):
if not user or not enterprise:
raise ValueError('You must set a user name and an enterprise name to begin impersonification')
self._is_impersonating = True
self._impersonation = "%s@%s" % (user, enterprise)
| 811,260 |
Register a model class according to its remote name
Args:
model: the model to register
|
def register_model(cls, model):
rest_name = model.rest_name
resource_name = model.resource_name
if rest_name not in cls._model_rest_name_registry:
cls._model_rest_name_registry[rest_name] = [model]
cls._model_resource_name_registry[resource_name] = [model]
elif model not in cls._model_rest_name_registry[rest_name]:
cls._model_rest_name_registry[rest_name].append(model)
cls._model_resource_name_registry[resource_name].append(model)
| 811,342 |
Get the first model corresponding to a rest_name
Args:
rest_name: the rest name
|
def get_first_model_with_rest_name(cls, rest_name):
models = cls.get_models_with_rest_name(rest_name)
if len(models) > 0:
return models[0]
return None
| 811,343 |
Get the first model corresponding to a resource_name
Args:
resource_name: the resource name
|
def get_first_model_with_resource_name(cls, resource_name):
models = cls.get_models_with_resource_name(resource_name)
if len(models) > 0:
return models[0]
return None
| 811,344 |
Intializes a BambouHTTPError
Args:
connection: the Connection object
|
def __init__(self, connection):
self.connection = connection
response = connection.response
super(BambouHTTPError, self).__init__("[HTTP %s(%s)] %s" % (response.status_code, response.reason, response.errors))
| 811,714 |
Create a child space from an module.
Args:
module: a module object or name of the module object.
recursive: Not yet implemented.
**params: arguments to pass to ``new_space``
Returns:
The new child space created from the module.
|
def import_module(self, module=None, recursive=False, **params):
if module is None:
if "module_" in params:
warnings.warn(
"Parameter 'module_' is deprecated. Use 'module' instead.")
module = params.pop("module_")
else:
raise ValueError("no module specified")
if "bases" in params:
params["bases"] = get_impls(params["bases"])
space = (
self._impl.model.currentspace
) = self._impl.new_space_from_module(
module, recursive=recursive, **params
)
return get_interfaces(space)
| 811,994 |
Create a child space from an module.
Alias to :py:meth:`import_module`.
Args:
module: a module object or name of the module object.
recursive: Not yet implemented.
**params: arguments to pass to ``new_space``
Returns:
The new child space created from the module.
|
def new_space_from_module(self, module, recursive=False, **params):
if "bases" in params:
params["bases"] = get_impls(params["bases"])
space = (
self._impl.model.currentspace
) = self._impl.new_space_from_module(
module, recursive=recursive, **params
)
return get_interfaces(space)
| 811,995 |
Display the model tree window.
Args:
model: :class:`Model <modelx.core.model.Model>` object.
Defaults to the current model.
Warnings:
For this function to work with Spyder, *Graphics backend* option
of Spyder must be set to *inline*.
|
def show_tree(model=None):
if model is None:
model = mx.cur_model()
view = get_modeltree(model)
app = QApplication.instance()
if not app:
raise RuntimeError("QApplication does not exist.")
view.show()
app.exec_()
| 812,090 |
Construct a CarbonClientService.
Args:
reactor: The Twisted reactor for your application.
hostname: The hostname of your Carbon server.
port: The port that the Carbon pickle endpoint is listening on.
|
def __init__(self, reactor, hostname, port):
service.MultiService.__init__(self)
self._reactor = reactor
self._hostname = hostname
self._port = port
self._client_factory = None
self._tcp_client = None
self._repeating_metric_handles = []
| 812,199 |
Record a single hit on a given metric.
Args:
metric_name: The name of the metric to record with Carbon.
metric_value: The value to record with Carbon.
epoch_seconds: Optionally specify the time for the metric hit.
Returns:
None
|
def publish_metric(self, metric_name, metric_value, epoch_seconds=None):
if epoch_seconds is None:
epoch_seconds = self._reactor.seconds()
self._client_factory.publish_metric(metric_name, metric_value, int(epoch_seconds))
| 812,200 |
Record hits to a metric at a specified interval.
Args:
metric_name: The name of the metric to record with Carbon.
frequency: The frequency with which to poll the getter and record the value with Carbon.
getter: A function which takes no arguments and returns the value to record with Carbon.
Returns:
RepeatingMetricHandle instance. Call .stop() on it to stop recording the metric.
|
def register_repeating_metric(self, metric_name, frequency, getter):
l = task.LoopingCall(self._publish_repeating_metric, metric_name, getter)
repeating_metric_handle = RepeatingMetricHandle(l, frequency)
self._repeating_metric_handles.append(repeating_metric_handle)
if self.running:
repeating_metric_handle.start()
return repeating_metric_handle
| 812,201 |
Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Arguments:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration.
|
def setup(console=False, port=None, menu=True):
if self._has_been_setup:
teardown()
register_plugins()
register_host()
if menu:
add_to_filemenu()
self._has_menu = True
self._has_been_setup = True
print("pyblish: Loaded successfully.")
| 812,204 |
Prevent accidental assignment of existing members
Arguments:
object (object): Parent of new attribute
name (str): Name of new attribute
value (object): Value of new attribute
safe (bool): Whether or not to guarantee that
the new attribute was not overwritten.
Can be set to False under condition that
it is superseded by extensive testing.
|
def _remap(object, name, value, safe=True):
if os.getenv("QT_TESTING") is not None and safe:
# Cannot alter original binding.
if hasattr(object, name):
raise AttributeError("Cannot override existing name: "
"%s.%s" % (object.__name__, name))
# Cannot alter classes of functions
if type(object).__name__ != "module":
raise AttributeError("%s != 'module': Cannot alter "
"anything but modules" % object)
elif hasattr(object, name):
# Keep track of modifications
self.__modified__.append(name)
self.__remapped__.append(name)
setattr(object, name, value)
| 812,266 |
Calculate the Method Resolution Order of bases using the C3 algorithm.
Code modified from
http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/
Args:
bases: sequence of direct base spaces.
Returns:
mro as a list of bases including node itself
|
def get_mro(self, space):
seqs = [self.get_mro(base) for base
in self.get_bases(space)] + [list(self.get_bases(space))]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
res.insert(0, space)
return res
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError(
"inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0]
| 812,287 |
Create a new code object by altering some of ``code`` attributes
Args:
code: code objcect
attrs: a mapping of names of code object attrs to their values
|
def _alter_code(code, **attrs):
PyCode_New = ctypes.pythonapi.PyCode_New
PyCode_New.argtypes = (
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.c_int,
ctypes.py_object)
PyCode_New.restype = ctypes.py_object
args = [
[code.co_argcount, 'co_argcount'],
[code.co_kwonlyargcount, 'co_kwonlyargcount'],
[code.co_nlocals, 'co_nlocals'],
[code.co_stacksize, 'co_stacksize'],
[code.co_flags, 'co_flags'],
[code.co_code, 'co_code'],
[code.co_consts, 'co_consts'],
[code.co_names, 'co_names'],
[code.co_varnames, 'co_varnames'],
[code.co_freevars, 'co_freevars'],
[code.co_cellvars, 'co_cellvars'],
[code.co_filename, 'co_filename'],
[code.co_name, 'co_name'],
[code.co_firstlineno, 'co_firstlineno'],
[code.co_lnotab, 'co_lnotab']]
for arg in args:
if arg[1] in attrs:
arg[0] = attrs[arg[1]]
return PyCode_New(
args[0][0], # code.co_argcount,
args[1][0], # code.co_kwonlyargcount,
args[2][0], # code.co_nlocals,
args[3][0], # code.co_stacksize,
args[4][0], # code.co_flags,
args[5][0], # code.co_code,
args[6][0], # code.co_consts,
args[7][0], # code.co_names,
args[8][0], # code.co_varnames,
args[9][0], # code.co_freevars,
args[10][0], # code.co_cellvars,
args[11][0], # code.co_filename,
args[12][0], # code.co_name,
args[13][0], # code.co_firstlineno,
args[14][0])
| 812,330 |
Create a cells in the space.
Args:
name: If omitted, the model is named automatically ``CellsN``,
where ``N`` is an available number.
func: The function to define the formula of the cells.
Returns:
The new cells.
|
def new_cells(self, name=None, formula=None):
# Outside formulas only
return self._impl.new_cells(name, formula).interface
| 812,379 |
Check if item is in the space.
item can be either a cells or space.
Args:
item: a cells or space to check.
Returns:
True if item is a direct child of the space, False otherwise.
|
def __contains__(self, item):
if isinstance(item, str):
return item in self._impl.namespace
elif isinstance(item, Cells):
return item._impl in self._impl.cells.values()
elif isinstance(item, StaticSpace):
return item._impl in self._impl.spaces.values()
else:
return False
| 812,382 |
Convert multiple cells to a frame.
If args is an empty sequence, all values are included.
If args is specified, cellsiter must have shareable parameters.
Args:
cellsiter: A mapping from cells names to CellsImpl objects.
args: A sequence of arguments
|
def cellsiter_to_dataframe(cellsiter, args, drop_allna=True):
from modelx.core.cells import shareable_parameters
if len(args):
indexes = shareable_parameters(cellsiter)
else:
indexes = get_all_params(cellsiter.values())
result = None
for cells in cellsiter.values():
df = cells_to_dataframe(cells, args)
if drop_allna and df.isnull().all().all():
continue # Ignore all NA or empty
if df.index.names != [None]:
if isinstance(df.index, pd.MultiIndex):
if _pd_ver < (0, 20):
df = _reset_naindex(df)
df = df.reset_index()
missing_params = set(indexes) - set(df)
for params in missing_params:
df[params] = np.nan
if result is None:
result = df
else:
try:
result = pd.merge(result, df, how="outer")
except MergeError:
# When no common column exists, i.e. all cells are scalars.
result = pd.concat([result, df], axis=1)
except ValueError:
# When common columns are not coercible (numeric vs object),
# Make the numeric column object type
cols = set(result.columns) & set(df.columns)
for col in cols:
# When only either of them has object dtype
if (
len(
[
str(frame[col].dtype)
for frame in (result, df)
if str(frame[col].dtype) == "object"
]
)
== 1
):
if str(result[col].dtype) == "object":
frame = df
else:
frame = result
frame[[col]] = frame[col].astype("object")
# Try again
result = pd.merge(result, df, how="outer")
if result is None:
return pd.DataFrame()
else:
return result.set_index(indexes) if indexes else result
| 812,427 |
Remove all descendants of(reachable from) `source`.
Args:
source: Node descendants
clear_source(bool): Remove origin too if True.
Returns:
set: The removed nodes.
|
def clear_descendants(self, source, clear_source=True):
desc = nx.descendants(self, source)
if clear_source:
desc.add(source)
self.remove_nodes_from(desc)
return desc
| 812,454 |
Calculate the max number of item that an option can stored in the pool at give time.
This is to limit the pool size to POOL_SIZE
Args:
option_index (int): the index of the option to calculate the size for
pool (dict): answer pool
num_option (int): total number of options available for the question
item_length (int): the length of the item
Returns:
int: the max number of items that `option_index` can have
|
def get_max_size(pool, num_option, item_length):
max_items = POOL_SIZE / item_length
# existing items plus the reserved for min size. If there is an option has 1 item, POOL_OPTION_MIN_SIZE - 1 space
# is reserved.
existing = POOL_OPTION_MIN_SIZE * num_option + sum([max(0, len(pool.get(i, {})) - 5) for i in xrange(num_option)])
return int(max_items - existing)
| 813,468 |
This validator checks if the answers includes all possible options
Args:
answers (str): the answers to be checked
options (dict): all options that should exist in the answers
algo (str): selection algorithm
Returns:
None if everything is good. Otherwise, the missing option error message.
|
def validate_seeded_answers_simple(answers, options, algo):
seen_options = {}
for answer in answers:
if answer:
key = options[answer['answer']].get('text')
if options[answer['answer']].get('image_url'):
key += options[answer['answer']].get('image_url')
seen_options.setdefault(key, 0)
seen_options[key] += 1
missing_options = []
index = 1
for option in options:
key = option.get('text') + option.get('image_url') if option.get('image_url') else option.get('text')
if option.get('text') != 'n/a':
if seen_options.get(key, 0) == 0:
missing_options.append(_('Option ') + str(index))
index += 1
if missing_options:
return {'seed_error': _('Missing option seed(s): ') + ', '.join(missing_options)}
return None
| 813,472 |
Get answers from others with simple algorithm, which picks one answer for each option.
Args:
see `get_other_answers`
num_responses (int): the number of responses to be returned. This value may not be
respected if there is not enough answers to return
Returns:
dict: answers based on the selection algorithm
|
def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses):
ret = []
# clean up answers so that all keys are int
pool = {int(k): v for k, v in pool.items()}
total_in_pool = len(seeded_answers)
merged_pool = convert_seeded_answers(seeded_answers)
student_id = get_student_item_dict()['student_id']
# merge the dictionaries in the answer dictionary
for key in pool:
total_in_pool += len(pool[key])
# if student_id has value, we assume the student just submitted an answer. So removing it
# from total number in the pool
if student_id in pool[key].keys():
total_in_pool -= 1
if key in merged_pool:
merged_pool[key].update(pool[key].items())
else:
merged_pool[key] = pool[key]
# remember which option+student_id is selected, so that we don't have duplicates in the result
selected = []
# loop until we have enough answers to return
while len(ret) < min(num_responses, total_in_pool):
for option, students in merged_pool.items():
student = student_id
i = 0
while (student == student_id or i > 100) and (str(option) + student) not in selected:
# retry until we got a different one or after 100 retries
# we are suppose to get a different student answer or a seeded one in a few tries
# as we have at least one seeded answer for each option in the algo. And it is not
# suppose to overflow i order to break the loop
student = random.choice(students.keys())
i += 1
selected.append(str(option)+student)
if student.startswith('seeded'):
# seeded answer, get the rationale from local
rationale = students[student]
else:
student_item = get_student_item_dict(student)
submission = sas_api.get_answers_for_student(student_item)
rationale = submission.get_rationale(0)
ret.append({'option': option, 'rationale': rationale})
# check if we have enough answers
if len(ret) >= min(num_responses, total_in_pool):
break
return {"answers": ret}
| 813,475 |
Get answers from others with random algorithm, which randomly select answer from the pool.
Student may get three answers for option 1 or one answer for option 1 and two answers for option 2.
Args:
see `get_other_answers`
num_responses (int): the number of responses to be returned. This value may not be
respected if there is not enough answers to return
Returns:
dict: answers based on the selection algorithm
|
def get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses):
ret = []
# clean up answers so that all keys are int
pool = {int(k): v for k, v in pool.items()}
seeded = {'seeded'+str(index): answer for index, answer in enumerate(seeded_answers)}
merged_pool = seeded.keys()
for key in pool:
merged_pool += pool[key].keys()
# shuffle
random.shuffle(merged_pool)
# get student identifier
student_id = get_student_item_dict()['student_id']
for student in merged_pool:
if len(ret) >= num_responses:
# have enough answers
break
elif student == student_id:
# this is the student's answer so don't return
continue
if student.startswith('seeded'):
option = seeded[student]['answer']
rationale = seeded[student]['rationale']
else:
student_item = get_student_item_dict(student)
submission = sas_api.get_answers_for_student(student_item)
rationale = submission.get_rationale(0)
option = submission.get_vote(0)
ret.append({'option': option, 'rationale': rationale})
return {"answers": ret}
| 813,476 |
Convert seeded answers into the format that can be merged into student answers.
Args:
answers (list): seeded answers
Returns:
dict: seeded answers with student answers format:
{
0: {
'seeded0': 'rationaleA'
}
1: {
'seeded1': 'rationaleB'
}
}
|
def convert_seeded_answers(answers):
converted = {}
for index, answer in enumerate(answers):
converted.setdefault(answer['answer'], {})
converted[answer['answer']]['seeded' + str(index)] = answer['rationale']
return converted
| 813,477 |
Tokenize a text, index a term matrix, and build out a graph.
Args:
path (str): The file path.
term_depth (int): Consider the N most frequent terms.
skim_depth (int): Connect each word to the N closest siblings.
d_weights (bool): If true, give "close" nodes low weights.
Returns:
Skimmer: The indexed graph.
|
def build_graph(path, term_depth=1000, skim_depth=10,
d_weights=False, **kwargs):
# Tokenize text.
click.echo('\nTokenizing text...')
t = Text.from_file(path)
click.echo('Extracted %d tokens' % len(t.tokens))
m = Matrix()
# Index the term matrix.
click.echo('\nIndexing terms:')
m.index(t, t.most_frequent_terms(term_depth), **kwargs)
g = Skimmer()
# Construct the network.
click.echo('\nGenerating graph:')
g.build(t, m, skim_depth, d_weights)
return g
| 813,748 |
1. For each term in the passed matrix, score its KDE similarity with
all other indexed terms.
2. With the ordered stack of similarities in hand, skim off the top X
pairs and add them as edges.
Args:
text (Text): The source text instance.
matrix (Matrix): An indexed term matrix.
skim_depth (int): The number of siblings for each term.
d_weights (bool): If true, give "close" words low edge weights.
|
def build(self, text, matrix, skim_depth=10, d_weights=False):
for anchor in bar(matrix.keys):
n1 = text.unstem(anchor)
# Heaviest pair scores:
pairs = matrix.anchored_pairs(anchor).items()
for term, weight in list(pairs)[:skim_depth]:
# If edges represent distance, use the complement of the raw
# score, so that similar words are connected by "short" edges.
if d_weights: weight = 1-weight
n2 = text.unstem(term)
# NetworkX does not handle numpy types when writing graphml,
# so we cast the weight to a regular float.
self.graph.add_edge(n1, n2, weight=float(weight))
| 813,750 |
Create a ColorPicker dialog.
Arguments:
* parent: parent window
* color: initially selected color in rgb or hexa format
* alpha: alpha channel support (boolean)
* title: dialog title
|
def __init__(self, parent=None, color=(255, 0, 0), alpha=False,
title=_("Color Chooser")):
tk.Toplevel.__init__(self, parent)
self.title(title)
self.transient(self.master)
self.resizable(False, False)
self.rowconfigure(1, weight=1)
self.color = ""
self.alpha_channel = bool(alpha)
style = ttk.Style(self)
style.map("palette.TFrame", relief=[('focus', 'sunken')],
bordercolor=[('focus', "#4D4D4D")])
self.configure(background=style.lookup("TFrame", "background"))
if isinstance(color, str):
if re.match(r"^#[0-9A-F]{8}$", color.upper()):
col = hexa_to_rgb(color)
self._old_color = col[:3]
if alpha:
self._old_alpha = col[3]
old_color = color
else:
old_color = color[:7]
elif re.match(r"^#[0-9A-F]{6}$", color.upper()):
self._old_color = hexa_to_rgb(color)
old_color = color
if alpha:
self._old_alpha = 255
old_color += 'FF'
else:
col = self.winfo_rgb(color)
self._old_color = tuple(round2(c * 255 / 65535) for c in col)
args = self._old_color
if alpha:
self._old_alpha = 255
args = self._old_color + (255,)
old_color = rgb_to_hexa(*args)
else:
self._old_color = color[:3]
if alpha:
if len(color) < 4:
color += (255,)
self._old_alpha = 255
else:
self._old_alpha = color[3]
old_color = rgb_to_hexa(*color)
# --- GradientBar
hue = col2hue(*self._old_color)
bar = ttk.Frame(self, borderwidth=2, relief='groove')
self.bar = GradientBar(bar, hue=hue, width=200, highlightthickness=0)
self.bar.pack()
# --- ColorSquare
square = ttk.Frame(self, borderwidth=2, relief='groove')
self.square = ColorSquare(square, hue=hue, width=200, height=200,
color=rgb_to_hsv(*self._old_color),
highlightthickness=0)
self.square.pack()
frame = ttk.Frame(self)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(1, weight=1)
# --- color preview: initial color and currently selected color side by side
preview_frame = ttk.Frame(frame, relief="groove", borderwidth=2)
preview_frame.grid(row=0, column=0, sticky="nw", pady=2)
if alpha:
self._transparent_bg = create_checkered_image(42, 32)
transparent_bg_old = create_checkered_image(42, 32,
(100, 100, 100, 255),
(154, 154, 154, 255))
prev_old = overlay(transparent_bg_old, hexa_to_rgb(old_color))
prev = overlay(self._transparent_bg, hexa_to_rgb(old_color))
self._im_old_color = ImageTk.PhotoImage(prev_old, master=self)
self._im_color = ImageTk.PhotoImage(prev, master=self)
old_color_prev = tk.Label(preview_frame, padx=0, pady=0,
image=self._im_old_color,
borderwidth=0, highlightthickness=0)
self.color_preview = tk.Label(preview_frame, pady=0, padx=0,
image=self._im_color,
borderwidth=0, highlightthickness=0)
else:
old_color_prev = tk.Label(preview_frame, background=old_color[:7],
width=5, highlightthickness=0, height=2,
padx=0, pady=0)
self.color_preview = tk.Label(preview_frame, width=5, height=2,
pady=0, background=old_color[:7],
padx=0, highlightthickness=0)
old_color_prev.bind("<1>", self._reset_preview)
old_color_prev.grid(row=0, column=0)
self.color_preview.grid(row=0, column=1)
# --- palette
palette = ttk.Frame(frame)
palette.grid(row=0, column=1, rowspan=2, sticky="ne")
for i, col in enumerate(PALETTE):
f = ttk.Frame(palette, borderwidth=1, relief="raised",
style="palette.TFrame")
l = tk.Label(f, background=col, width=2, height=1)
l.bind("<1>", self._palette_cmd)
f.bind("<FocusOut>", lambda e: e.widget.configure(relief="raised"))
l.pack()
f.grid(row=i % 2, column=i // 2, padx=2, pady=2)
col_frame = ttk.Frame(self)
# --- hsv
hsv_frame = ttk.Frame(col_frame, relief="ridge", borderwidth=2)
hsv_frame.pack(pady=(0, 4), fill="x")
hsv_frame.columnconfigure(0, weight=1)
self.hue = LimitVar(0, 360, self)
self.saturation = LimitVar(0, 100, self)
self.value = LimitVar(0, 100, self)
s_h = Spinbox(hsv_frame, from_=0, to=360, width=4, name='spinbox',
textvariable=self.hue, command=self._update_color_hsv)
s_s = Spinbox(hsv_frame, from_=0, to=100, width=4,
textvariable=self.saturation, name='spinbox',
command=self._update_color_hsv)
s_v = Spinbox(hsv_frame, from_=0, to=100, width=4, name='spinbox',
textvariable=self.value, command=self._update_color_hsv)
h, s, v = rgb_to_hsv(*self._old_color)
s_h.delete(0, 'end')
s_h.insert(0, h)
s_s.delete(0, 'end')
s_s.insert(0, s)
s_v.delete(0, 'end')
s_v.insert(0, v)
s_h.grid(row=0, column=1, sticky='w', padx=4, pady=4)
s_s.grid(row=1, column=1, sticky='w', padx=4, pady=4)
s_v.grid(row=2, column=1, sticky='w', padx=4, pady=4)
ttk.Label(hsv_frame, text=_('Hue')).grid(row=0, column=0, sticky='e',
padx=4, pady=4)
ttk.Label(hsv_frame, text=_('Saturation')).grid(row=1, column=0, sticky='e',
padx=4, pady=4)
ttk.Label(hsv_frame, text=_('Value')).grid(row=2, column=0, sticky='e',
padx=4, pady=4)
# --- rgb
rgb_frame = ttk.Frame(col_frame, relief="ridge", borderwidth=2)
rgb_frame.pack(pady=4, fill="x")
rgb_frame.columnconfigure(0, weight=1)
self.red = LimitVar(0, 255, self)
self.green = LimitVar(0, 255, self)
self.blue = LimitVar(0, 255, self)
s_red = Spinbox(rgb_frame, from_=0, to=255, width=4, name='spinbox',
textvariable=self.red, command=self._update_color_rgb)
s_green = Spinbox(rgb_frame, from_=0, to=255, width=4, name='spinbox',
textvariable=self.green, command=self._update_color_rgb)
s_blue = Spinbox(rgb_frame, from_=0, to=255, width=4, name='spinbox',
textvariable=self.blue, command=self._update_color_rgb)
s_red.delete(0, 'end')
s_red.insert(0, self._old_color[0])
s_green.delete(0, 'end')
s_green.insert(0, self._old_color[1])
s_blue.delete(0, 'end')
s_blue.insert(0, self._old_color[2])
s_red.grid(row=0, column=1, sticky='e', padx=4, pady=4)
s_green.grid(row=1, column=1, sticky='e', padx=4, pady=4)
s_blue.grid(row=2, column=1, sticky='e', padx=4, pady=4)
ttk.Label(rgb_frame, text=_('Red')).grid(row=0, column=0, sticky='e',
padx=4, pady=4)
ttk.Label(rgb_frame, text=_('Green')).grid(row=1, column=0, sticky='e',
padx=4, pady=4)
ttk.Label(rgb_frame, text=_('Blue')).grid(row=2, column=0, sticky='e',
padx=4, pady=4)
# --- hexa
hexa_frame = ttk.Frame(col_frame)
hexa_frame.pack(fill="x")
self.hexa = ttk.Entry(hexa_frame, justify="center", width=10, name='entry')
self.hexa.insert(0, old_color.upper())
ttk.Label(hexa_frame, text="HTML").pack(side="left", padx=4, pady=(4, 1))
self.hexa.pack(side="left", padx=6, pady=(4, 1), fill='x', expand=True)
# --- alpha
if alpha:
alpha_frame = ttk.Frame(self)
alpha_frame.columnconfigure(1, weight=1)
self.alpha = LimitVar(0, 255, self)
alphabar = ttk.Frame(alpha_frame, borderwidth=2, relief='groove')
self.alphabar = AlphaBar(alphabar, alpha=self._old_alpha, width=200,
color=self._old_color, highlightthickness=0)
self.alphabar.pack()
s_alpha = Spinbox(alpha_frame, from_=0, to=255, width=4,
textvariable=self.alpha, command=self._update_alpha)
s_alpha.delete(0, 'end')
s_alpha.insert(0, self._old_alpha)
alphabar.grid(row=0, column=0, padx=(0, 4), pady=4, sticky='w')
ttk.Label(alpha_frame, text=_('Alpha')).grid(row=0, column=1, sticky='e',
padx=4, pady=4)
s_alpha.grid(row=0, column=2, sticky='w', padx=(4, 6), pady=4)
# --- validation
button_frame = ttk.Frame(self)
ttk.Button(button_frame, text="Ok",
command=self.ok).pack(side="right", padx=10)
ttk.Button(button_frame, text=_("Cancel"),
command=self.destroy).pack(side="right", padx=10)
# --- placement
bar.grid(row=0, column=0, padx=10, pady=(10, 4), sticky='n')
square.grid(row=1, column=0, padx=10, pady=(9, 0), sticky='n')
if alpha:
alpha_frame.grid(row=2, column=0, columnspan=2, padx=10,
pady=(1, 4), sticky='ewn')
col_frame.grid(row=0, rowspan=2, column=1, padx=(4, 10), pady=(10, 4))
frame.grid(row=3, column=0, columnspan=2, pady=(4, 10), padx=10, sticky="new")
button_frame.grid(row=4, columnspan=2, pady=(0, 10), padx=10)
# --- bindings
self.bar.bind("<ButtonRelease-1>", self._change_color, True)
self.bar.bind("<Button-1>", self._unfocus, True)
if alpha:
self.alphabar.bind("<ButtonRelease-1>", self._change_alpha, True)
self.alphabar.bind("<Button-1>", self._unfocus, True)
self.square.bind("<Button-1>", self._unfocus, True)
self.square.bind("<ButtonRelease-1>", self._change_sel_color, True)
self.square.bind("<B1-Motion>", self._change_sel_color, True)
s_red.bind('<FocusOut>', self._update_color_rgb)
s_green.bind('<FocusOut>', self._update_color_rgb)
s_blue.bind('<FocusOut>', self._update_color_rgb)
s_red.bind('<Return>', self._update_color_rgb)
s_green.bind('<Return>', self._update_color_rgb)
s_blue.bind('<Return>', self._update_color_rgb)
s_red.bind('<Control-a>', self._select_all_spinbox)
s_green.bind('<Control-a>', self._select_all_spinbox)
s_blue.bind('<Control-a>', self._select_all_spinbox)
s_h.bind('<FocusOut>', self._update_color_hsv)
s_s.bind('<FocusOut>', self._update_color_hsv)
s_v.bind('<FocusOut>', self._update_color_hsv)
s_h.bind('<Return>', self._update_color_hsv)
s_s.bind('<Return>', self._update_color_hsv)
s_v.bind('<Return>', self._update_color_hsv)
s_h.bind('<Control-a>', self._select_all_spinbox)
s_s.bind('<Control-a>', self._select_all_spinbox)
s_v.bind('<Control-a>', self._select_all_spinbox)
if alpha:
s_alpha.bind('<Return>', self._update_alpha)
s_alpha.bind('<FocusOut>', self._update_alpha)
s_alpha.bind('<Control-a>', self._select_all_spinbox)
self.hexa.bind("<FocusOut>", self._update_color_hexa)
self.hexa.bind("<Return>", self._update_color_hexa)
self.hexa.bind("<Control-a>", self._select_all_entry)
self.hexa.focus_set()
self.wait_visibility()
self.lift()
self.grab_set()
| 813,764 |
Returns the names of everything (books, notes, graphs, etc.) in the project.
Args:
matching (str, optional): if given, only return names with this string in it
workbooks (bool): if True, return workbooks
graphs (bool): if True, return workbooks
Returns:
A list of the names of what you requested
|
def pageNames(matching=False,workbooks=True,graphs=True):
# first collect the pages we want
pages=[]
if workbooks:
pages.extend(PyOrigin.WorksheetPages())
if graphs:
pages.extend(PyOrigin.GraphPages())
# then turn them into a list of strings
pages = [x.GetName() for x in pages]
# do our string matching if it's needed
if matching:
pages=[x for x in pages if matching in x]
return pages
| 813,766 |
Returns the type of the page with that name.
If that name doesn't exist, None is returned.
Args:
name (str): name of the page to get the folder from
number (bool): if True, return numbers (i.e., a graph will be 3)
if False, return words where appropriate (i.e, "graph")
Returns:
string of the type of object the page is
|
def getPageType(name,number=False):
if not name in pageNames():
return None
pageType=PyOrigin.Pages(name).GetType()
if number:
return str(pageType)
if pageType==1:
return "matrix"
if pageType==2:
return "book"
if pageType==3:
return "graph"
if pageType==4:
return "layout"
if pageType==5:
return "notes"
| 813,767 |
Prints every page in the project to the console.
Args:
matching (str, optional): if given, only return names with this string in it
|
def listEverything(matching=False):
pages=pageNames()
if matching:
pages=[x for x in pages if matching in x]
for i,page in enumerate(pages):
pages[i]="%s%s (%s)"%(pageFolder(page),page,getPageType(page))
print("\n".join(sorted(pages)))
| 813,768 |
return sheet names of a book.
Args:
book (str, optional): If a book is given, pull names from
that book. Otherwise, try the active one
Returns:
list of sheet names (typical case).
None if book has no sheets.
False if book doesn't exlist.
|
def sheetNames(book=None):
if book:
if not book.lower() in [x.lower() for x in bookNames()]:
return False
else:
book=activeBook()
if not book:
return False
poBook=PyOrigin.WorksheetPages(book)
if not len(poBook):
return None
return [x.GetName() for x in poBook.Layers()]
| 813,769 |
Yield tokens.
Args:
text (str): The original text.
Yields:
dict: The next token.
|
def tokenize(text):
stem = PorterStemmer().stem
tokens = re.finditer('[a-z]+', text.lower())
for offset, match in enumerate(tokens):
# Get the raw token.
unstemmed = match.group(0)
yield { # Emit the token.
'stemmed': stem(unstemmed),
'unstemmed': unstemmed,
'offset': offset
}
| 813,792 |
Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary.
|
def sort_dict(d, desc=True):
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)
return OrderedDict(sort)
| 813,793 |
Yield a sliding window over an iterable.
Args:
seq (iter): The sequence.
n (int): The window width.
Yields:
tuple: The next window.
|
def window(seq, n=2):
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for token in it:
result = result[1:] + (token,)
yield result
| 813,794 |
Parse <seeds> element in the UBCPI XBlock's content XML.
Args:
root (lxml.etree.Element): The root of the <seeds> node in the tree.
Returns:
a list of deserialized representation of seeds. E.g.
[{
'answer': 1, # option index starting from one
'rationale': 'This is a seeded answer',
},
{....
}]
Raises:
ValidationError: The XML definition is invalid.
|
def parse_seeds_xml(root):
seeds = []
for seed_el in root.findall('seed'):
seed_dict = dict()
seed_dict['rationale'] = _safe_get_text(seed_el)
if 'option' in seed_el.attrib:
seed_dict['answer'] = int(seed_el.attrib['option']) - 1
else:
raise ValidationError(_('Seed element must have an option attribute.'))
seeds.append(seed_dict)
return seeds
| 813,826 |
Update the UBCPI XBlock's content from an XML definition.
We need to be strict about the XML we accept, to avoid setting
the XBlock to an invalid state (which will then be persisted).
Args:
root (lxml.etree.Element): The XML definition of the XBlock's content.
Returns:
A dictionary of all of the XBlock's content.
Raises:
UpdateFromXmlError: The XML definition is invalid
|
def parse_from_xml(root):
# Check that the root has the correct tag
if root.tag != 'ubcpi':
raise UpdateFromXmlError(_('Every peer instruction tool must contain an "ubcpi" element.'))
display_name_el = root.find('display_name')
if display_name_el is None:
raise UpdateFromXmlError(_('Every peer instruction tool must contain a "display_name" element.'))
else:
display_name = _safe_get_text(display_name_el)
rationale_size_min = int(root.attrib['rationale_size_min']) if 'rationale_size_min' in root.attrib else None
rationale_size_max = int(root.attrib['rationale_size_max']) if 'rationale_size_max' in root.attrib else None
question_el = root.find('question')
if question_el is None:
raise UpdateFromXmlError(_('Every peer instruction must tool contain a "question" element.'))
else:
question = parse_question_xml(question_el)
options_el = root.find('options')
if options_el is None:
raise UpdateFromXmlError(_('Every peer instruction must tool contain a "options" element.'))
else:
options, correct_answer, correct_rationale = parse_options_xml(options_el)
seeds_el = root.find('seeds')
if seeds_el is None:
raise UpdateFromXmlError(_('Every peer instruction must tool contain a "seeds" element.'))
else:
seeds = parse_seeds_xml(seeds_el)
algo = unicode(root.attrib['algorithm']) if 'algorithm' in root.attrib else None
num_responses = unicode(root.attrib['num_responses']) if 'num_responses' in root.attrib else None
return {
'display_name': display_name,
'question_text': question,
'options': options,
'rationale_size': {'min': rationale_size_min, 'max': rationale_size_max},
'correct_answer': correct_answer,
'correct_rationale': correct_rationale,
'seeds': seeds,
'algo': {"name": algo, 'num_responses': num_responses}
}
| 813,827 |
Serialize the options in peer instruction XBlock to xml
Args:
options (lxml.etree.Element): The <options> XML element.
block (PeerInstructionXBlock): The XBlock with configuration to serialize.
Returns:
None
|
def serialize_options(options, block):
for index, option_dict in enumerate(block.options):
option = etree.SubElement(options, 'option')
# set correct option and rationale
if index == block.correct_answer:
option.set('correct', u'True')
if hasattr(block, 'correct_rationale'):
rationale = etree.SubElement(option, 'rationale')
rationale.text = block.correct_rationale['text']
text = etree.SubElement(option, 'text')
text.text = option_dict.get('text', '')
serialize_image(option_dict, option)
| 813,828 |
Serialize the seeds in peer instruction XBlock to xml
Args:
seeds (lxml.etree.Element): The <seeds> XML element.
block (PeerInstructionXBlock): The XBlock with configuration to serialize.
Returns:
None
|
def serialize_seeds(seeds, block):
for seed_dict in block.seeds:
seed = etree.SubElement(seeds, 'seed')
# options in xml starts with 1
seed.set('option', unicode(seed_dict.get('answer', 0) + 1))
seed.text = seed_dict.get('rationale', '')
| 813,830 |
Serialize the Peer Instruction XBlock's content to XML.
Args:
block (PeerInstructionXBlock): The peer instruction block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
|
def serialize_to_xml(root, block):
root.tag = 'ubcpi'
if block.rationale_size is not None:
if block.rationale_size.get('min'):
root.set('rationale_size_min', unicode(block.rationale_size.get('min')))
if block.rationale_size.get('max'):
root.set('rationale_size_max', unicode(block.rationale_size['max']))
if block.algo:
if block.algo.get('name'):
root.set('algorithm', block.algo.get('name'))
if block.algo.get('num_responses'):
root.set('num_responses', unicode(block.algo.get('num_responses')))
display_name = etree.SubElement(root, 'display_name')
display_name.text = block.display_name
question = etree.SubElement(root, 'question')
question_text = etree.SubElement(question, 'text')
question_text.text = block.question_text['text']
serialize_image(block.question_text, question)
options = etree.SubElement(root, 'options')
serialize_options(options, block)
seeds = etree.SubElement(root, 'seeds')
serialize_seeds(seeds, block)
| 813,831 |
Set the value for a pair of terms.
Args:
term1 (str)
term2 (str)
value (mixed)
|
def set_pair(self, term1, term2, value, **kwargs):
key = self.key(term1, term2)
self.keys.update([term1, term2])
self.pairs[key] = value
| 813,858 |
Get the value for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
The stored value.
|
def get_pair(self, term1, term2):
key = self.key(term1, term2)
return self.pairs.get(key, None)
| 813,859 |
Index all term pair distances.
Args:
text (Text): The source text.
terms (list): Terms to index.
|
def index(self, text, terms=None, **kwargs):
self.clear()
# By default, use all terms.
terms = terms or text.terms.keys()
pairs = combinations(terms, 2)
count = comb(len(terms), 2)
for t1, t2 in bar(pairs, expected_size=count, every=1000):
# Set the Bray-Curtis distance.
score = text.score_braycurtis(t1, t2, **kwargs)
self.set_pair(t1, t2, score)
| 813,860 |
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
|
def anchored_pairs(self, anchor):
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs)
| 813,861 |
Create a text from a file.
Args:
path (str): The file path.
|
def from_file(cls, path):
with open(path, 'r', errors='replace') as f:
return cls(f.read())
| 813,898 |
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
|
def __init__(self, text, stopwords=None):
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
| 813,899 |
Load a set of stopwords.
Args:
path (str): The stopwords file path.
|
def load_stopwords(self, path):
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
| 813,900 |
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
|
def most_frequent_terms(self, depth):
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
| 813,904 |
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
|
def unstem(self, term):
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
| 813,905 |
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
|
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
| 813,906 |
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
|
def score_intersect(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
| 813,907 |
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
|
def score_cosine(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
| 813,908 |
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
|
def score_braycurtis(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
| 813,909 |
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
|
def plot_term_kdes(self, words, **kwargs):
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
| 813,910 |
Truncates the rationale for analytics event emission if necessary
Args:
rationale (string): the string value of the rationale
max_length (int): the max length for truncation
Returns:
truncated_value (string): the possibly truncated version of the rationale
was_truncated (bool): returns true if the rationale is truncated
|
def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT):
if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length:
return rationale[0:max_length], True
else:
return rationale, False
| 813,951 |
Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
Args:
anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
Returns:
(dict): The student item associated with this XBlock instance. This
includes the student id, item id, and course id.
|
def get_student_item_dict(self, anonymous_user_id=None):
item_id = self._serialize_opaque_key(self.scope_ids.usage_id)
# This is not the real way course_ids should work, but this is a
# temporary expediency for LMS integration
if hasattr(self, "xmodule_runtime"):
course_id = self.get_course_id() # pylint:disable=E1101
if anonymous_user_id:
student_id = anonymous_user_id
else:
student_id = self.xmodule_runtime.anonymous_student_id # pylint:disable=E1101
else:
course_id = "edX/Enchantment_101/April_1"
if self.scope_ids.user_id is None:
student_id = ''
else:
student_id = unicode(self.scope_ids.user_id)
student_item_dict = dict(
student_id=student_id,
item_id=item_id,
course_id=course_id,
item_type='ubcpi'
)
return student_item_dict
| 813,953 |
Retrieve answers from backend for a student and question
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
Returns:
Answers: answers for the student
|
def get_answers_for_student(student_item):
submissions = sub_api.get_submissions(student_item)
if not submissions:
return Answers()
latest_submission = submissions[0]
latest_answer_item = latest_submission.get('answer', {})
return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))
| 813,954 |
Add an answer for a student to the backend
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
|
def add_answer_for_student(student_item, vote, rationale):
answers = get_answers_for_student(student_item)
answers.add_answer(vote, rationale)
sub_api.create_submission(student_item, {
ANSWER_LIST_KEY: answers.get_answers_as_list()
})
| 813,955 |
Get an answer data (vote or rationale) by revision
Args:
revision (int): the revision number for student answer, could be
0 (original) or 1 (revised)
key (str); key for retrieve answer data, could be VOTE_KEY or
RATIONALE_KEY
Returns:
the answer data or None if revision doesn't exists
|
def _safe_get(self, revision, key):
if self.has_revision(revision):
return self.raw_answers[revision].get(key)
else:
return None
| 813,956 |
Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
|
def add_answer(self, vote, rationale):
self.raw_answers.append({
VOTE_KEY: vote,
RATIONALE_KEY: rationale,
})
| 813,957 |
Read valid locations from HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
List[Dict]: A list of valid locations
|
def validlocations(configuration=None):
# type: () -> List[Dict]
if Locations._validlocations is None:
if configuration is None:
configuration = Configuration.read()
Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True})
return Locations._validlocations
| 814,070 |
Get location from HDX location code
Args:
code (str): code for which to get location name
locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[str]: location name
|
def get_location_from_HDX_code(code, locations=None, configuration=None):
# type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str]
if locations is None:
locations = Locations.validlocations(configuration)
for locdict in locations:
if code.upper() == locdict['name'].upper():
return locdict['title']
return None
| 814,071 |
Get HDX code for location
Args:
location (str): Location for which to get HDX code
locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[str]: HDX code or None
|
def get_HDX_code_from_location(location, locations=None, configuration=None):
# type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str]
if locations is None:
locations = Locations.validlocations(configuration)
locationupper = location.upper()
for locdict in locations:
locationcode = locdict['name'].upper()
if locationupper == locationcode:
return locationcode
for locdict in locations:
if locationupper == locdict['title'].upper():
return locdict['name'].upper()
return None
| 814,072 |
Get HDX code for location
Args:
location (str): Location for which to get HDX code
locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Tuple[Optional[str], bool]: HDX code and if the match is exact or (None, False) for no match
|
def get_HDX_code_from_location_partial(location, locations=None, configuration=None):
# type: (str, Optional[List[Dict]], Optional[Configuration]) -> Tuple[Optional[str], bool]
hdx_code = Locations.get_HDX_code_from_location(location, locations, configuration)
if hdx_code is not None:
return hdx_code, True
if locations is None:
locations = Locations.validlocations(configuration)
locationupper = location.upper()
for locdict in locations:
locationname = locdict['title'].upper()
if locationupper in locationname or locationname in locationupper:
return locdict['name'].upper(), False
return None, False
| 814,073 |
Validates definition files in a directory.
Args:
path (str): path of the definition file.
extension (Optional[str]): extension of the filenames to read.
Returns:
bool: True if the directory contains valid definitions.
|
def CheckDirectory(self, path, extension='yaml'):
result = True
if extension:
glob_spec = os.path.join(path, '*.{0:s}'.format(extension))
else:
glob_spec = os.path.join(path, '*')
for definition_file in sorted(glob.glob(glob_spec)):
if not self.CheckFile(definition_file):
result = False
return result
| 814,075 |
Validates the definition in a file.
Args:
path (str): path of the definition file.
Returns:
bool: True if the file contains valid definitions.
|
def CheckFile(self, path):
print('Checking: {0:s}'.format(path))
definitions_registry = registry.DataTypeDefinitionsRegistry()
definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()
result = False
try:
definitions_reader.ReadFile(definitions_registry, path)
result = True
except KeyError as exception:
logging.warning((
'Unable to register data type definition in file: {0:s} with '
'error: {1:s}').format(path, exception))
except errors.FormatError as exception:
logging.warning(
'Unable to validate file: {0:s} with error: {1:s}'.format(
path, exception))
return result
| 814,076 |
Initializes a data type fabric.
Args:
yaml_definition (str): YAML formatted data type definitions.
|
def __init__(self, yaml_definition=None):
definitions_registry = registry.DataTypeDefinitionsRegistry()
if yaml_definition:
definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()
file_object = io.BytesIO(yaml_definition)
definitions_reader.ReadFileObject(definitions_registry, file_object)
super(DataTypeFabric, self).__init__(definitions_registry)
| 814,077 |
Inlines all CSS in an HTML string
Given an HTML document with CSS declared in the HEAD, inlines it into the
applicable elements. Used primarily in the preparation of styled emails.
Arguments:
html_message -- a string of HTML, including CSS
|
def inline_css(html_message, encoding='unicode'):
document = etree.HTML(html_message)
converter = Conversion()
converter.perform(document, html_message, '', encoding=encoding)
return converter.convertedHTML
| 814,083 |
Initializes a data type map context.
Args:
values (dict[str, object]): values per name.
|
def __init__(self, values=None):
super(DataTypeMapContext, self).__init__()
self.byte_size = None
self.state = {}
self.values = values or {}
| 814,084 |
Initializes a data type map size hint.
Args:
byte_size (int): byte size.
is_complete (optional[bool]): True if the size is the complete size of
the data type.
|
def __init__(self, byte_size, is_complete=False):
super(DataTypeMapSizeHint, self).__init__()
self.byte_size = byte_size
self.is_complete = is_complete
| 814,085 |
Initializes a data type map.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
FormatError: if the data type map cannot be determined from the data
type definition.
|
def __init__(self, data_type_definition):
super(DataTypeMap, self).__init__()
self._data_type_definition = data_type_definition
| 814,086 |
Checks if the byte stream is large enough for the data type.
Args:
byte_stream (bytes): byte stream.
byte_offset (int): offset into the byte stream where to start.
data_type_size (int): data type size.
Raises:
ByteStreamTooSmallError: if the byte stream is too small.
MappingError: if the size of the byte stream cannot be determined.
|
def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size):
try:
byte_stream_size = len(byte_stream)
except Exception as exception:
raise errors.MappingError(exception)
if byte_stream_size - byte_offset < data_type_size:
raise errors.ByteStreamTooSmallError(
'Byte stream too small requested: {0:d} available: {1:d}'.format(
data_type_size, byte_stream_size))
| 814,087 |
Initializes a primitive data type map.
Args:
data_type_definition (DataTypeDefinition): data type definition.
|
def __init__(self, data_type_definition):
super(PrimitiveDataTypeMap, self).__init__(data_type_definition)
self._operation = self._GetByteStreamOperation()
| 814,090 |
Folds the data type into a byte stream.
Args:
mapped_value (object): mapped value.
Returns:
bytes: byte stream.
Raises:
FoldingError: if the data type definition cannot be folded into
the byte stream.
|
def FoldByteStream(self, mapped_value, **unused_kwargs):
try:
value = self.FoldValue(mapped_value)
return self._operation.WriteTo(tuple([value]))
except Exception as exception:
error_string = (
'Unable to write: {0:s} to byte stream with error: {1!s}').format(
self._data_type_definition.name, exception)
raise errors.FoldingError(error_string)
| 814,091 |
Maps the data type on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
|
def MapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
data_type_size = self._data_type_definition.GetByteSize()
self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size)
try:
struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])
mapped_value = self.MapValue(*struct_tuple)
except Exception as exception:
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: {2!s}').format(
self._data_type_definition.name, byte_offset, exception)
raise errors.MappingError(error_string)
if context:
context.byte_size = data_type_size
return mapped_value
| 814,092 |
Initializes a boolean data type map.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
FormatError: if the data type map cannot be determined from the data
type definition.
|
def __init__(self, data_type_definition):
if (data_type_definition.false_value is None and
data_type_definition.true_value is None):
raise errors.FormatError(
'Boolean data type has no True or False values.')
super(BooleanMap, self).__init__(data_type_definition)
| 814,093 |
Folds the data type into a value.
Args:
value (object): value.
Returns:
object: folded value.
Raises:
ValueError: if the data type definition cannot be folded into the value.
|
def FoldValue(self, value):
if value is False and self._data_type_definition.false_value is not None:
return self._data_type_definition.false_value
if value is True and self._data_type_definition.true_value is not None:
return self._data_type_definition.true_value
raise ValueError('No matching True and False values')
| 814,094 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.