function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def sample_create_context():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateContextRequest(
parent="parent_value",
)
# Make the request
response = client.create_context(request=request)
# Handle the response
print(response) | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def handle(self, *args, **options):
if options.get('filename'):
self.ku_openlearning(options.get('filename'), options.get('source_id')) | ocwc/ocwc-data | [
6,
2,
6,
1,
1379956634
] |
def wrap(wrapped_function, *args, **kwargs):
try:
return wrapped_function(*args, **kwargs)
except socket.error:
err = sys.exc_info()[1]
raise SocketError(err) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, parse_yaml=True,
connect_timeout=socket.getdefaulttimeout()):
if parse_yaml is True:
try:
parse_yaml = __import__('yaml').load
except ImportError:
logging.error('Failed to load PyYAML, will not parse YAML')
parse_yaml = False
self._connect_timeout = connect_timeout
self._parse_yaml = parse_yaml or (lambda x: x)
self.host = host
self.port = port
self.connect() | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def __exit__(self, exc_type, exc_value, traceback):
self.close() | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def _interact(self, command, expected_ok, expected_err=[]):
SocketError.wrap(self._socket.sendall, command)
status, results = self._read_response()
if status in expected_ok:
return results
elif status in expected_err:
raise CommandFailed(command.split()[0], status, results)
else:
raise UnexpectedResponse(command.split()[0], status, results) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def _read_body(self, size):
body = SocketError.wrap(self._socket_file.read, size)
SocketError.wrap(self._socket_file.read, 2) # trailing crlf
if size > 0 and not body:
raise SocketError()
return body | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def _interact_job(self, command, expected_ok, expected_err, reserved=True):
jid, size = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return Job(self, int(jid), body, reserved) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def _interact_peek(self, command):
try:
return self._interact_job(command, ['FOUND'], ['NOT_FOUND'], False)
except CommandFailed:
return None | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, str), 'Job body must be a str instance'
jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def kick(self, bound=1):
"""Kick at most bound jobs into the ready queue."""
return int(self._interact_value('kick %d\r\n' % bound, ['KICKED'])) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def peek(self, jid):
"""Peek at a job. Returns a Job, or None."""
return self._interact_peek('peek %d\r\n' % jid) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def peek_delayed(self):
"""Peek at next delayed job. Returns a Job, or None."""
return self._interact_peek('peek-delayed\r\n') | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def tubes(self):
"""Return a list of all existing tubes."""
return self._interact_yaml('list-tubes\r\n', ['OK']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def use(self, name):
"""Use a given tube."""
return self._interact_value('use %s\r\n' % name, ['USING']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def watch(self, name):
"""Watch a given tube."""
return int(self._interact_value('watch %s\r\n' % name, ['WATCHING'])) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def stats(self):
"""Return a dict of beanstalkd statistics."""
return self._interact_yaml('stats\r\n', ['OK']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def pause_tube(self, name, delay):
"""Pause a tube for a given delay time, in seconds."""
self._interact('pause-tube %s %d\r\n' % (name, delay),
['PAUSED'],
['NOT_FOUND']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def delete(self, jid):
"""Delete a job, by job id."""
self._interact('delete %d\r\n' % jid, ['DELETED'], ['NOT_FOUND']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def bury(self, jid, priority=DEFAULT_PRIORITY):
"""Bury a job, by job id."""
self._interact('bury %d %d\r\n' % (jid, priority),
['BURIED'],
['NOT_FOUND']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def stats_job(self, jid):
"""Return a dict of stats about a job, by job id."""
return self._interact_yaml('stats-job %d\r\n' % jid,
['OK'],
['NOT_FOUND']) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def __init__(self, conn, jid, body, reserved=True):
self.conn = conn
self.jid = jid
self.body = body
self.reserved = reserved | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def touch(self):
"""Touch this reserved job, requesting more time to work on it before
it expires."""
if self.reserved:
self.conn.touch(self.jid) | earl/beanstalkc | [
455,
114,
455,
25,
1227206321
] |
def commit_signal(data_id):
"""Nudge manager at the end of every Data object save event."""
if not getattr(settings, "FLOW_MANAGER_DISABLE_AUTO_CALLS", False):
immediate = getattr(settings, "FLOW_MANAGER_SYNC_AUTO_CALLS", False)
async_to_sync(manager.communicate)(data_id=data_id, run_sync=immediate) | genialis/resolwe | [
34,
27,
34,
7,
1428595640
] |
def manager_post_save_handler(sender, instance, created, **kwargs):
"""Run newly created (spawned) processes."""
if (
instance.status == Data.STATUS_DONE
or instance.status == Data.STATUS_ERROR
or created
):
# Run manager at the end of the potential transaction. Otherwise
# tasks are send to workers before transaction ends and therefore
# workers cannot access objects created inside transaction.
transaction.on_commit(lambda: commit_signal(instance.id)) | genialis/resolwe | [
34,
27,
34,
7,
1428595640
] |
def __init__(self,
config,
logger,
scope=constants.COMPUTE_SCOPE,
discovery=constants.CLOUDRESOURCES_DISCOVERY,
api_version=constants.API_V1):
super(CloudResourcesBase, self).__init__(
config,
logger,
scope,
discovery,
api_version) | cloudify-cosmo/cloudify-gcp-plugin | [
6,
13,
6,
6,
1428599218
] |
def get(self):
raise NotImplementedError() | cloudify-cosmo/cloudify-gcp-plugin | [
6,
13,
6,
6,
1428599218
] |
def scale_gradient(
t: tf.Tensor, scale: types.FloatLike | deepmind/sonnet | [
9523,
1351,
9523,
33,
1491219275
] |
def grad(dy: tf.Tensor) -> Tuple[tf.Tensor, None]:
"""Scaled gradient."""
return scale * dy, None | deepmind/sonnet | [
9523,
1351,
9523,
33,
1491219275
] |
def test_unknown_tokens_are_tokenised():
assert_tokens("~", is_token("unknown", "~")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_whitespace_is_tokenised():
assert_tokens(" \t\t ", is_token("whitespace", " \t\t ")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_escape_sequences_in_identifiers_are_tokenised():
assert_tokens(r"\:", is_token("identifier", r"\:")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_strings_are_tokenised():
assert_tokens("'Tristan'", is_token("string", "'Tristan'")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_unterminated_strings_are_tokenised():
assert_tokens("'Tristan", is_token("unterminated string", "'Tristan")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_dots_are_tokenised():
assert_tokens(".", is_token("symbol", ".")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_greater_thans_are_tokenised():
assert_tokens(">>", is_token("symbol", ">"), is_token("symbol", ">")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_open_parens_are_tokenised():
assert_tokens("((", is_token("symbol", "("), is_token("symbol", "(")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_open_square_brackets_are_tokenised():
assert_tokens("[[", is_token("symbol", "["), is_token("symbol", "[")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_choices_are_tokenised():
assert_tokens("||", is_token("symbol", "|"), is_token("symbol", "|")) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def test_can_tokenise_multiple_tokens():
assert_tokens("The Magic Position",
is_token("identifier", "The"),
is_token("whitespace", " "),
is_token("identifier", "Magic"),
is_token("whitespace", " "),
is_token("identifier", "Position"),
) | mwilliamson/python-mammoth | [
617,
109,
617,
20,
1385907211
] |
def __init__(self, prior):
self.prior = prior | mwhoffman/reggie | [
6,
6,
6,
2,
1425653663
] |
def test_bounds(self):
bshape = np.shape(self.prior.bounds)
assert bshape == (2,) or bshape == (self.prior.ndim, 2) | mwhoffman/reggie | [
6,
6,
6,
2,
1425653663
] |
def test_logprior(self):
for theta in self.prior.sample(5, 0):
g1 = spop.approx_fprime(theta, self.prior.get_logprior, 1e-8)
_, g2 = self.prior.get_logprior(theta, True)
nt.assert_allclose(g1, g2, rtol=1e-6) | mwhoffman/reggie | [
6,
6,
6,
2,
1425653663
] |
def __init__(self):
PriorTest.__init__(self, priors.Uniform([0, 0], [1, 1])) | mwhoffman/reggie | [
6,
6,
6,
2,
1425653663
] |
def __init__(self):
PriorTest.__init__(self, priors.Normal([0, 0], [1, 1])) | mwhoffman/reggie | [
6,
6,
6,
2,
1425653663
] |
def __init__(self):
PriorTest.__init__(self, priors.LogNormal([0, 0], [1, 1])) | mwhoffman/reggie | [
6,
6,
6,
2,
1425653663
] |
def indent(cls, element, indent=" ", level=0):
"""Set whitespace for indentation. | srguiwiz/nrvr-commander | [
16,
5,
16,
9,
1371660119
] |
def unindent(cls, element):
"""Remove whitespace from indentation. | srguiwiz/nrvr-commander | [
16,
5,
16,
9,
1371660119
] |
def tostring(cls, element, indent=" ", xml_declaration=True, encoding="utf-8"):
"""Generate a string representation. | srguiwiz/nrvr-commander | [
16,
5,
16,
9,
1371660119
] |
def simpledict(cls, element):
"""Generate a dictionary from child element tags and text. | srguiwiz/nrvr-commander | [
16,
5,
16,
9,
1371660119
] |
def testConstructor(self):
# Expected values
expctdName = 'Cn'
expctdDescription = "Channel concatenation" | shodimaggio/SaivDr | [
7,
8,
7,
6,
1447318698
] |
def testPredict(self,
nchs,nrows,ncols,datatype):
rtol,atol=1e-5,1e-8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Parameters
nSamples = 8
nChsTotal = sum(nchs)
# nSamples x nRows x nCols x (nChsTotal-1)
Xac = torch.randn(nSamples,nrows,ncols,nChsTotal-1,dtype=datatype,device=device,requires_grad=True)
# nSamples x nRows x nCols
Xdc = torch.randn(nSamples,nrows,ncols,dtype=datatype,device=device,requires_grad=True)
# Expected values
# nSamples x nRows x nCols x nChsTotal
expctdZ = torch.cat((Xdc.unsqueeze(dim=3),Xac),dim=3)
# Instantiation of target class
layer = NsoltChannelConcatenation2dLayer(
name='Cn'
)
# Actual values
with torch.no_grad():
actualZ = layer.forward(Xac=Xac,Xdc=Xdc)
# Evaluation
self.assertEqual(actualZ.dtype,datatype)
self.assertTrue(torch.allclose(actualZ,expctdZ,rtol=rtol,atol=atol))
self.assertFalse(actualZ.requires_grad) | shodimaggio/SaivDr | [
7,
8,
7,
6,
1447318698
] |
def testPredictUnsqueezedXdc(self,
nchs,nrows,ncols,datatype):
rtol,atol=1e-5,1e-8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Parameters
nSamples = 8
nChsTotal = sum(nchs)
# nSamples x nRows x nCols x (nChsTotal-1)
Xac = torch.randn(nSamples,nrows,ncols,nChsTotal-1,dtype=datatype,device=device,requires_grad=True)
# nSamples x nRows x nCols x 1
Xdc = torch.randn(nSamples,nrows,ncols,1,dtype=datatype,device=device,requires_grad=True)
# Expected values
# nSamples x nRows x nCols x nChsTotal
expctdZ = torch.cat((Xdc,Xac),dim=3)
# Instantiation of target class
layer = NsoltChannelConcatenation2dLayer(
name='Cn'
)
# Actual values
with torch.no_grad():
actualZ = layer.forward(Xac=Xac,Xdc=Xdc)
# Evaluation
self.assertEqual(actualZ.dtype,datatype)
self.assertTrue(torch.allclose(actualZ,expctdZ,rtol=rtol,atol=atol))
self.assertFalse(actualZ.requires_grad) | shodimaggio/SaivDr | [
7,
8,
7,
6,
1447318698
] |
def testBackward(self,
nchs,nrows,ncols,datatype):
rtol,atol=1e-5,1e-8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | shodimaggio/SaivDr | [
7,
8,
7,
6,
1447318698
] |
def testBackwardUnsqueezedXdc(self,
nchs,nrows,ncols,datatype):
rtol,atol=1e-5,1e-8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | shodimaggio/SaivDr | [
7,
8,
7,
6,
1447318698
] |
def resilient_backpropagation(network, trainingset, testset, cost_function, ERROR_LIMIT=1e-3, max_iterations = (), weight_step_max = 50., weight_step_min = 0., start_step = 0.5, learn_max = 1.2, learn_min = 0.5, print_rate = 1000, save_trained_network = False ):
# Implemented according to iRprop+
# http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf | jorgenkg/python-neural-network | [
292,
99,
292,
4,
1420805959
] |
def test_marshaller_api_versions():
assert ('1.0', ) == \
hdf5storage.plugins.supported_marshaller_api_versions() | frejanordsiek/hdf5storage | [
73,
20,
73,
16,
1387523795
] |
def __init__(self, handle=0):
super(GXDBWRITE, self).__init__(GXContext._get_tls_geo(), handle) | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def null(cls):
"""
A null (undefined) instance of `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def is_null(self):
"""
Check if this is a null (undefined) instance | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def create(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
Add channels using the `add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.channel. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def create_xy(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object for a XY-located data. Add channels using the
`add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def create_xyz(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object for a XYZ-located data.
Add channels using the `add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.channel | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def add_channel(self, chan):
"""
Add a data channel to the `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_db(self):
"""
Get the output `GXDB <geosoft.gxapi.GXDB>` handle from the `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_vv(self, chan):
"""
Get the `GXVV <geosoft.gxapi.GXVV>` handle for a channel. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_va(self, chan):
"""
Get the `GXVA <geosoft.gxapi.GXVA>` handle for an array channel. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_v_vx(self):
"""
Get the X channel `GXVV <geosoft.gxapi.GXVV>` handle. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_v_vy(self):
"""
Get the Y channel `GXVV <geosoft.gxapi.GXVV>` handle. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_v_vz(self):
"""
Get the Z channel `GXVV <geosoft.gxapi.GXVV>` handle. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def get_chan_array_size(self, chan):
"""
Get the number of columns of data in a channel. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def add_block(self, line):
"""
Add the current block of data. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def commit(self):
"""
Commit remaining data to the database. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def test_func(self, ra):
"""
Temporary test function. | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def setUp(self):
self.setUp1() | js850/nested_sampling | [
25,
19,
25,
6,
1373470342
] |
def test1(self):
print "running TestNS"
self.assert_(len(self.ns.replicas) == self.nreplicas)
self.assert_(self.Emax < self.Emax0)
self.assert_(self.Emin < self.Emax)
self.assert_(self.Emin >= 0)
self.assert_(self.ns.stepsize != self.stepsize)
self.assertEqual(len(self.ns.max_energies), self.niter * self.nproc) | js850/nested_sampling | [
25,
19,
25,
6,
1373470342
] |
def setUp(self):
self.setUp1(nproc=3) | js850/nested_sampling | [
25,
19,
25,
6,
1373470342
] |
def setUp(self):
self.setUp1(nproc=3,multiproc=False) | js850/nested_sampling | [
25,
19,
25,
6,
1373470342
] |
def create_matrices(self, extra_g=None, extra_h=None):
""" Initialize the augmented stoichiometric matrix.
extra_g: (n x nr) array
Extra entries in the constraint matrix. postive values for lower
bounds, negative values for upper bounds
extra_h: (n) array
Corresponding bounds for the extra entries matrix
"""
# Create stoichiometric matrix, get key dimensions
N = cobra.util.create_stoichiometric_matrix(self.model)
nm, nr = N.shape
self.nm = nm
self.nr = nr
# Construct full G and h matrices, then drop homogeneous (or near
# homogeneous) entries
g_full = np.vstack([np.eye(nr), -np.eye(nr)])
h_full = np.array([(r.lower_bound, -r.upper_bound)
for r in self.model.reactions]).T.flatten()
inhomogeneous = ~((h_full <= -1000) | np.isclose(h_full, 0))
h_full = h_full[inhomogeneous]
g_full = g_full[inhomogeneous]
if extra_g is not None:
assert extra_g.shape[1] == nr
assert extra_g.shape[0] == len(extra_h)
g_full = np.vstack([g_full, extra_g])
h_full = np.hstack([h_full, extra_h])
G = g_full
h = h_full
self.nt = nt = len(h)
self.D = np.vstack([
np.hstack([N, np.zeros((nm, nt)), np.zeros((nm, 1))]),
np.hstack([G, -np.eye(nt), np.atleast_2d(-h).T])
]) | pstjohn/pyefm | [
1,
2,
1,
1,
1484250450
] |
def read_double_out(self, out_file):
with open(out_file, 'rb') as f:
out_arr = np.fromstring(f.read()[13:], dtype='>d').reshape(
(-1, self.nt + self.nr + 1)).T
out_arr = np.asarray(out_arr, dtype=np.float64).T
# Sort by the absolute value of the stoichiometry
sort_inds= np.abs(out_arr[:, :self.nr]).sum(1).argsort()
out_arr = out_arr[sort_inds]
unbounded = out_arr[np.isclose(out_arr[:,-1], 0.)]
bounded = out_arr[~np.isclose(out_arr[:,-1], 0.)]
if bounded.size: # Test if its empty
bounded /= np.atleast_2d(bounded[:,-1]).T
unbounded_df = pd.DataFrame(
unbounded[:, :self.nr],
columns=[r.id for r in self.model.reactions],
index=['UEV{}'.format(i)
for i in range(1, unbounded.shape[0] + 1)])
bounded_df = pd.DataFrame(
bounded[:, :self.nr],
columns=[r.id for r in self.model.reactions],
index=('BEV{}'.format(i)
for i in range(1, bounded.shape[0] + 1)))
return unbounded_df.append(bounded_df) | pstjohn/pyefm | [
1,
2,
1,
1,
1484250450
] |
def calculate_elementary_vectors(cobra_model, opts=None, verbose=True,
java_args=None, extra_g=None, extra_h=None):
"""Calculate elementary flux vectors, which capture arbitrary linear
constraints. Approach as detailed in S. Klamt et al., PLoS Comput Biol. 13,
e1005409–22 (2017).
Augmented constraints as a hacky workaround for implementing more
complicated constraints without using optlang.
java_args: string
Extra command-line options to pass to the java virtual machine.
Eg. '-Xmx1g' will set the heap space to 1 GB. | pstjohn/pyefm | [
1,
2,
1,
1,
1484250450
] |
def get_support_minimal(efvs):
"""Return only those elementary flux vectors whose support is not a proper
superset of another EFV""" | pstjohn/pyefm | [
1,
2,
1,
1,
1484250450
] |
def save(self, update_site=False, *args, **kwargs):
"""
Set the site to the current site when the record is first
created, or the ``update_site`` argument is explicitly set
to ``True``.
"""
if update_site or not self.id:
self.site_id = current_site_id()
super(SiteRelated, self).save(*args, **kwargs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def __str__(self):
return self.title | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def generate_unique_slug(self):
"""
Create a unique slug by passing the result of get_slug() to
utils.urls.unique_slug, which appends an index if necessary.
"""
# For custom content types, use the ``Page`` instance for
# slug lookup.
concrete_model = base_concrete_model(Slugged, self)
slug_qs = concrete_model.objects.exclude(id=self.id)
return unique_slug(slug_qs, "slug", self.get_slug()) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def admin_link(self):
return "<a href='%s'>%s</a>" % (self.get_absolute_url(),
ugettext("View on site")) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def save(self, *args, **kwargs):
"""
Set the description field on save.
"""
if self.gen_description:
self.description = strip_tags(self.description_from_content())
super(MetaData, self).save(*args, **kwargs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def description_from_content(self):
"""
Returns the first block or sentence of the first content-like
field.
"""
description = ""
# Use the first RichTextField, or TextField if none found.
for field_type in (RichTextField, models.TextField):
if not description:
for field in self._meta.fields:
if isinstance(field, field_type) and \
field.name != "description":
description = getattr(self, field.name)
if description:
from mezzanine.core.templatetags.mezzanine_tags \
import richtext_filters
description = richtext_filters(description)
break
# Fall back to the title if description couldn't be determined.
if not description:
description = str(self)
# Strip everything after the first block or sentence.
ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>",
"\n", ". ", "! ", "? ")
for end in ends:
pos = description.lower().find(end)
if pos > -1:
description = TagCloser(description[:pos]).html
break
else:
description = truncatewords_html(description, 100)
return description | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def save(self, *args, **kwargs):
_now = now()
self.updated = _now
if not self.id:
self.created = _now
super(TimeStamped, self).save(*args, **kwargs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def save(self, *args, **kwargs):
"""
Set default for ``publish_date``. We can't use ``auto_now_add`` on
the field as it will be blank when a blog post is created from
the quick blog form in the admin dashboard.
"""
if self.publish_date is None:
self.publish_date = now()
super(Displayable, self).save(*args, **kwargs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def publish_date_since(self):
"""
Returns the time since ``publish_date``.
"""
return timesince(self.publish_date) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def get_absolute_url(self):
"""
Raise an error if called on a subclass without
``get_absolute_url`` defined, to ensure all search results
contains a URL.
"""
name = self.__class__.__name__
raise NotImplementedError("The model %s does not have "
"get_absolute_url defined" % name) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def _get_next_or_previous_by_publish_date(self, is_next, **kwargs):
"""
Retrieves next or previous object by publish date. We implement
our own version instead of Django's so we can hook into the
published manager and concrete subclasses.
"""
arg = "publish_date__gt" if is_next else "publish_date__lt"
order = "publish_date" if is_next else "-publish_date"
lookup = {arg: self.publish_date}
concrete_model = base_concrete_model(Displayable, self)
try:
queryset = concrete_model.objects.published
except AttributeError:
queryset = concrete_model.objects.all
try:
return queryset(**kwargs).filter(**lookup).order_by(order)[0]
except IndexError:
pass | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def get_previous_by_publish_date(self, **kwargs):
"""
Retrieves previous object by publish date.
"""
return self._get_next_or_previous_by_publish_date(False, **kwargs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def __new__(cls, name, bases, attrs):
if "Meta" not in attrs:
class Meta:
pass
attrs["Meta"] = Meta
if hasattr(attrs["Meta"], "order_with_respect_to"):
order_field = attrs["Meta"].order_with_respect_to
attrs["order_with_respect_to"] = order_field
del attrs["Meta"].order_with_respect_to
if not hasattr(attrs["Meta"], "ordering"):
setattr(attrs["Meta"], "ordering", ("_order",))
return super(OrderableBase, cls).__new__(cls, name, bases, attrs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def with_respect_to(self):
"""
Returns a dict to use as a filter for ordering operations
containing the original ``Meta.order_with_respect_to`` value
if provided. If the field is a Generic Relation, the dict
returned contains names and values for looking up the
relation's ``ct_field`` and ``fk_field`` attributes.
"""
try:
name = self.order_with_respect_to
value = getattr(self, name)
except AttributeError:
# No ``order_with_respect_to`` specified on the model.
return {}
# Support for generic relations.
field = getattr(self.__class__, name)
if isinstance(field, GenericForeignKey):
names = (field.ct_field, field.fk_field)
return dict([(n, getattr(self, n)) for n in names])
return {name: value} | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
def delete(self, *args, **kwargs):
"""
Update the ordering values for siblings.
"""
lookup = self.with_respect_to()
lookup["_order__gte"] = self._order
concrete_model = base_concrete_model(Orderable, self)
after = concrete_model.objects.filter(**lookup)
after.update(_order=models.F("_order") - 1)
super(Orderable, self).delete(*args, **kwargs) | cccs-web/mezzanine | [
2,
1,
2,
1,
1404268714
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.