text
stringlengths
20
1.01M
url
stringlengths
14
1.25k
dump
stringlengths
9
15
lang
stringclasses
4 values
source
stringclasses
4 values
Need help cloning? Visit Bitbucket 101. Atlassian SourceTree is a free Git and Mercurial client for Windows. Atlassian SourceTree is a free Git and Mercurial client for Mac. class SimplePlugin(object): """Plugin base class which auto-subscribes methods for known channels.""" - + bus = None """A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.""" def __init__(self, bus): self.bus = bus def subscribe(self): """Register this object as a (multi-channel) listener on the bus.""" for channel in self.bus.listeners: method = getattr(self, channel, None) if method is not None: self.bus.subscribe(channel, method) def unsubscribe(self): """Unregister this object as a listener on the bus.""" class SignalHandler(object): """Register bus channels (and listeners) for system signals. You can modify what signals your application listens for, and what it does when it receives signals, by modifying :attr:`SignalHandler.handlers`, a dict of {signal name: callback} pairs. The default set is:: handlers = {'SIGTERM': self.bus.exit, 'SIGHUP': self.handle_SIGHUP, 'SIGUSR1': self.bus.graceful, } The :func:`SignalHandler.handle_SIGHUP`` method calls :func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>` if the process is daemonized, but :func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>` if the process is attached to a TTY. This is because Unix window managers tend to send SIGHUP to terminal windows when the user closes them. Feel free to add signals which are not available on every platform. The :class:`SignalHandler` will ignore errors raised from attempting to register handlers for unknown signals. """ handlers = {} """A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit).""" signals = {} """A map from signal numbers to names.""" for k, v in vars(_signal).items(): if k.startswith('SIG') and not k.startswith('SIG_'): signals[v] = k del k, v # Set default handlers self.handlers['SIGINT'] = self._jython_SIGINT_handler self._previous_handlers = {} def _jython_SIGINT_handler(self, signum=None, frame=None): # See self.bus.log('Keyboard Interrupt: shutting down bus') self.bus.exit() - """Subscribe self.handlers to signals.""" for sig, func in self.handlers.items(): self.set_handler(sig, func) except ValueError: pass """Unsubscribe self.handlers from signals.""" for signum, handler in self._previous_handlers.items(): signame = self.signals[signum] - if handler is None: self.bus.log("Restoring %s handler to SIG_DFL." % signame) handler = _signal.SIG_DFL else: self.bus.log("Restoring %s handler %r." % (signame, handler)) try: our_handler = _signal.signal(signum, handler) if our_handler is None: self.bus.log("Unable to restore %s handler %r." % (signame, handler), level=40, traceback=True) def set_handler(self, signal, listener=None): """Subscribe a handler for the given signal (number or name). If the optional 'listener' argument is provided, it will be subscribed as a listener for the given signal's channel. If the given signal name or number is not available on the current platform, ValueError is raised. """ except KeyError: raise ValueError("No such signal: %r" % signal) signum = signal prev = _signal.signal(signum, self._handle_signal) self._previous_handlers[signum] = prev if listener is not None: self.bus.log("Listening for %s." % signame) self.bus.subscribe(signame, listener) def _handle_signal(self, signum=None, frame=None): """Python signal handler (self.set_handler subscribes it for you).""" signame = self.signals[signum] self.bus.log("Caught signal %s." % signame) self.bus.publish(signame) def handle_SIGHUP(self): """Restart if daemonized, else exit.""" if os.isatty(sys.stdin.fileno()): class DropPrivileges(SimplePlugin): """Drop privileges. uid/gid arguments not available on Windows. Special thanks to Gavin Baker:. def __init__(self, bus, umask=None, uid=None, gid=None): SimplePlugin.__init__(self, bus) self.finalized = False self.uid = uid self.gid = gid self.umask = umask def _get_uid(self): return self._uid def _set_uid(self, val): self._uid = val uid = property(_get_uid, _set_uid, doc="The uid under which to run. Availability: Unix.") def _get_gid(self): return self._gid def _set_gid(self, val): self._gid = val gid = property(_get_gid, _set_gid, doc="The gid under which to run. Availability: Unix.") def _get_umask(self): return self._umask def _set_umask(self, val): self._umask = val umask = property(_get_umask, _set_umask, doc="""The default permission mode for newly created files and directories. Usually expressed in octal format, for example, ``0644``. Availability: Unix, Windows. """) def start(self): # uid/gid def current_ids(): if grp: group = grp.getgrgid(os.getgid())[0] return name, group if self.finalized: if not (self.uid is None and self.gid is None): self.bus.log('Already running as uid: %r gid: %r' % if self.uid is not None: os.setuid(self.uid) self.bus.log('Running as uid: %r gid: %r' % current_ids()) # umask if self.umask is not None: old_umask = os.umask(self.umask) self.bus.log('umask old: %03o, new: %03o' % (old_umask, self.umask)) self.finalized = True # This is slightly higher than the priority for server.start # in order to facilitate the most common use: starting on a low class Daemonizer(SimplePlugin): """Daemonize the running script. Use this with a Web Site Process Bus via:: Daemonizer(bus).subscribe() When this component finishes, the process is completely decoupled from the parent environment. Please note that when this component is used, the return code from the parent process will still be 0 if a startup of whether the process fully started. In fact, that return code only indicates if the process succesfully finished the first fork. def __init__(self, bus, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): self.stdout = stdout self.stderr = stderr self.bus.log('Already deamonized.') # forking has issues with threads: # # "The general problem with making fork() work in a multi-threaded self.bus.log('There are %r active threads. ' 'Daemonizing now may cause strange failures.' % threading.enumerate(), level=30) # See # (or section 1.7) # and # Finish up with the current stdout/stderr sys.stdout.flush() sys.stderr.flush() # Do first fork. try: pid = os.fork() exc = sys.exc_info()[1] sys.exit("%s: fork #1 failed: (%d) %s\n" % (sys.argv[0], exc.errno, exc.strerror)) os.setsid() # Do second fork sys.exit("%s: fork #2 failed: (%d) %s\n" os.chdir("/") os.umask(0) si = open(self.stdin, "r") so = open(self.stdout, "a+") se = open(self.stderr, "a+") os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) self.bus.log('Daemonized to PID: %s' % os.getpid()) start.priority = 65 class PIDFile(SimplePlugin): """Maintain a PID file via a WSPBus.""" def __init__(self, bus, pidfile): self.pidfile = pidfile pid = os.getpid() self.bus.log('PID %r written to %r.' % (pid, self.pidfile)) self.finalized = True start.priority = 70 def exit(self): os.remove(self.pidfile) class PerpetualTimer(threading._Timer): """A responsive subclass of threading._Timer whose run() method repeats. Use this timer only when you really need a very interruptible timer; this checks its 'finished' condition up to 20 times a second, which can - results in pretty high CPU usage + results in pretty high CPU usage def run(self): while True: self.finished.wait(self.interval) class BackgroundTask(threading.Thread): "". def __init__(self, interval, function, args=[], kwargs={}, bus=None): threading.Thread.__init__(self) self.interval = interval self.kwargs = kwargs self.running = False def cancel(self): self.running = True while self.running: % self.function, level=40, traceback=True) # Quit on first error to avoid massive logs. raise def _set_daemon(self): return True class Monitor(SimplePlugin): """WSPBus listener to periodically run a callback in its own thread.""" callback = None """The function to call at intervals.""" frequency = 60 """The time in seconds between callback runs.""" thread = None """A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread.""" def __init__(self, bus, callback, frequency=60, name=None): self.callback = callback self.frequency = frequency self.thread = None self.name = name """Start our callback in its own background thread.""" if self.frequency > 0: self.bus.log("Monitor thread %r already started." % threadname) def stop(self): """Stop our callback's background task thread.""" if self.thread is None: self.thread.join() self.bus.log("Stopped thread %r." % name) self.thread = None def graceful(self): """Stop the callback's background task thread and restart it.""" self.stop() class Autoreloader(Monitor): """Monitor which re-executes the process when files change. This :ref:`plugin<plugins>` restarts the process (via :func: :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins, the autoreload plugin takes a ``frequency`` argument. The default is 1 second; that is, the autoreloader will examine files once each second. files = None """The set of files to poll for modifications.""" frequency = 1 """The interval in seconds at which to poll for modified files.""" match = '.*' """A regular expression by which to match filenames.""" def __init__(self, bus, frequency=1, match='.*'): self.mtimes = {} self.files = set() self.match = match Monitor.__init__(self, bus, self.run, frequency) """Start our own background task thread for self.run.""" self.mtimes = {} Monitor.start(self) - start.priority = 70 + start.priority = 70 def sysfiles(self): """Return a Set of sys.modules filenames to monitor.""" files = set() f = os.path.normpath(os.path.join(_module__file__base, f)) files.add(f) return files """Reload the process if registered files have been modified.""" for filename in self.sysfiles() | self.files: if filename: if filename.endswith('.pyc'): filename = filename[:-1] - oldtime = self.mtimes.get(filename, 0) if oldtime is None: # Module with no .py file. Skip it. continue try: mtime = os.stat(filename).st_mtime except OSError: # Either a module with no .py file, or it's been deleted. mtime = None if filename not in self.mtimes: # If a module has no .py file, this will be None. self.mtimes[filename] = mtime class ThreadManager(SimplePlugin): """Manager for HTTP request threads. If you have control over thread creation and destruction, publish to the 'acquire_thread' and 'release_thread' channels (for each thread). This will register/unregister the current thread and publish to 'start_thread' and 'stop_thread' listeners in the bus as needed. If threads are created and destroyed by code you do not control (e.g., Apache), then, at the beginning of every HTTP request, publish to 'acquire_thread' only. You should not publish to the thread will be re-used or not. The bus will call 'stop_thread' listeners for you when it stops. threads = None """A map of {thread ident: index number} pairs.""" self.threads = {} def acquire_thread(self): """Run 'start_thread' listeners for the current thread. If the current thread has already been seen, any 'start_thread' listeners will not be run again. i = len(self.threads) + 1 self.threads[thread_ident] = i self.bus.publish('start_thread', i) def release_thread(self): """Release the current thread and run 'stop_thread' listeners.""" thread_ident = get_thread_ident() i = self.threads.pop(thread_ident, None) if i is not None: self.bus.publish('stop_thread', i) """Release all threads and run all 'stop_thread' listeners.""" for thread_ident, i in self.threads.items():
https://bitbucket.org/rpattinson/cherrypy/diff/cherrypy/process/plugins.py?diff2=13d698d4c824&at=cherrypy-3.2.x
CC-MAIN-2015-32
en
refinedweb
The Oracle XDK includes components that help you to determine the differences between the contents of two XML documents and then to apply the differences (patch) to one of the XML documents. This chapter contains these topics: You can use Oracle XmlDiff to determine the differences between two similar XML documents. XmlDiff generates an Xdiff instance document that indicates the differences. The Xdiff instance document is an XML document that conforms to an XML schema, the Xdiff schema. You can then use XmlPatch, which takes the Xdiff instance document and applies the changes to other documents. This can be used to apply the same changes to a large number of XML documents. XmlDiff only supports the DOM API for input and output. XmlPatch also supports the DOM for the input and patch documents. XmlDiff and XmlPatch can be used through a C API or a command line tool, and they are exposed by two SQL functions. An XmlHash C API is provided to compute the hash value of an XML tree or subtree. If hash values of two trees or subtrees are equal, the trees are identical to a very high probability. The flow of the process is as follows: The two input documents are compared by XmlDiff. XmlDiff creates a Xdiff instance document. The application can pass the Xdiff instance document to XmlPatch, if this is required. XmlPatch can apply the differences captured from the comparison to other documents as specified by the application. XmlDiff compares the trees that represent the two input documents to determine differences. Both input documents must use the same character-set encoding. The Xdiff (output) instance document has the same encoding as the data encoding (DOM encoding) of the input documents. There are two options for the comparison, known as optimizations: Global Optimization - Default The whole document trees are compared. Local Optimization Comparison is at the sibling level. Local optimization compares siblings under the corresponding parents from two trees. Global optimization can take more time and space for large documents but always produces the smallest set of differences (the optimal difference). Local optimization is much faster, but may not produce the optimal difference. Hashing generally speeds up global optimization with a small possible loss in quality. Hashing improves the quality of the difference output, with local optimization. Using different hash levels may generate both local and global differences. You can specify the use of hashing for both local and global optimization. To specify hashing, provide the hashLevel parameter. If hashLevel is greater than 1, then only the DOMHash values are used for comparing all subtrees at depth >= hashLevel of difference. If the hash values are equal, then the subtrees are presumed to be equal. XmlDiff ignores differences in the order of attributes while doing the comparison. XmlDiff ignores DocType declarations. Files are not validated against the DTD. XmlDiff ignores any differences in the namespace prefixes as long as the namespace prefixes refer to the same namespace URI. Otherwise, if two nodes have the same local name and content but differ in namespace URI, these differences are indicated. Note: XmlDiffoperates on its input documents in a nonschema-based way. It does not operate on elements or attributes in a type-aware manner. Table 21-1 describes command line options: Example 21-1 is a sample xml document that can be used to explain updates resulting from using both XmlDiff and XmlPatch. It is followed by some hypothetical changes. <?xml version="1.0"?> <booklist xmlns=""> <book> <title>Twelve Red Herrings</title> <author>Jeffrey Archer</author> <publisher>Harper Collins</publisher> <price>7.99</price> </book> <book> <title language="English">The Eleventh Commandment</title> <author>Jeffrey Archer</author> <publisher>McGraw Hill</publisher> <price>3.99</price> </book> <book> <title language="English" country="USA">C++ Primer</title> <author>Lippmann</author> <publisher>Harper Collins</publisher> <price>4.99</price> </book> <book> <title>Emperor's New Mind</title> <author>Roger Penrose</author> <publisher>Oxford Publishing Company</publisher> <price>15.9</price> </book> <book> <title>Evening News</title> <author>Arthur Hailey</author> <publisher>MacMillan Publishers</publisher> <price>9.99</price> </book> </booklist> Assume that there is another file, book2.xml, that looks just like the Example 21-1, "book1.xml" except that it results in the following: Deletes "The Eleventh Commandment", a delete-node operation. Changes the country code for the "C++ Primer" to US from USA, an update-node operation. Adds a description to "Emperor's New Mind", an append-node operation. Add the edition to "Evening News", an insert-node-before operation. Updates the price of "Evening News", an update-node operation. This section shows the Xdiff instance document produced by the comparison of these two XML files described in the previous section. The sections that follow explain the XML processing instructions and the operations on this document. You can invoke XmlDiff as follows: > xmldiff book1.xml book2.xml You can also examine the sample application for arguments and flags. Example 21-2 Sample Xdiff Instance Document <?xml version="1.0" encoding="UTF-8"?> <xd:xdiff xsi: <?oracle-xmldiff <xd:update-node xd: <xd:content>US</xd:content> </xd:update-node> <xd:append-node xd: <xd:content> <oraxdfns_0:description> This is a classic </oraxdfns_0:description> </xd:content> </xd:append-node> <xd:insert-node-before xd: <xd:content> <oraxdfns_0:edition>Hardcover</oraxdfns_0:edition> </xd:content> </xd:insert-node-before> <xd:update-node xd: <xd:content>12.99</xd:content> </xd:update-node> </xd:xdiff> The Xdiff instance document uses some XML processing instructions (shown in bold in the previous section) that are used to represent certain aspects of the differencing process. See "Xdiff Schema". These instructions and related options are: operations-in-docorder: Options are true or false: true - The Xdiff instance document refers to the nodes from the first document in the same order as in the document. false - The Xdiff instance document does not refer to the nodes from the first document in the same order as in the document. The output of global optimization meets the operations-in-docorder requirement, but local optimization does not. output-model: Options are: snapshot - Xmldiff generates output in snapshot model and follows the UNIX diff model. Each operation uses XPath as if no operations have been applied to the input document. This is the default. XmlPatch can only handle this model if operations-in-docorder is set to true and the XPaths are simple. Simple XPaths require a child axis, no wild cards, and must use positional predicates, such as /root[1]/child[2]/text()[2]. current - Each operation uses XPath as if all operations up to the previous one have been applied to the input document. Even though XmlDiff does not generate differences in the current model, XmlPatch can handle a hand-crafted diff document in the current model diff-algorithm: Options indicate which optimization generated the differences. Global optimization Local optimization See Also:"User Options for Optimization" XmlDiff captures differences using operations indicated by the Xdiff instance document. Note the following about Xdiff operations: The parent-xpath attribute or xpath attribute specifies the XPATH location of the parent node of the node to be operated on or XPATH location of node. The node-type attribute specifies the type of the node to be operated on. The content child element specifies the new subtree or value appended or inserted. The Xdiff operations, presented in the Xdiff Instance Document, are as follows: append-node: The append-node element specifies that a node of the given type is added as the last child of the given parent. insert-node-before: The insert-node-before element specifies that a node of the given type is inserted before the given reference node. delete-node: The delete-node element specifies that the node be deleted along with all its children. This can be used to delete elements, comments, and so on. update-node: update-node specifies that the value associated with the node with the given XPath expression is updated to the new value, which is specified. Content is the value for a text node. The value of an attribute is the value for an attribute node. Update for Text Nodes: Generation of update node operations can be turned off by the user. The value of an attribute is the value for an attribute node. update-node is generated for text nodes only by global optimization. Update for Elements: XmlDiff does not generate update operations for element nodes. You can either manually modify the Xdiff instance document to create an update operation that works with XmlPatch, or provide a totally hand-written Xdiff instance document. All children of the element operated on by the update are deleted. Any new subtree specified under the content node is imported. The output of XmlDiff, the Xdiff instance document, is in XML format and conforms to the Xdiff schema shown in the next section. The output document contains a sequence of operations describing the differences between the two input documents. If you apply the differences from the first document, you get the second document. Example 21-3 shows the Xdiff schema to which the Xdiff instance document (output) adheres. Example 21-3 Xdiff Schema: xdiff.xsd <schema targetNamespace="" xmlns="" xmlns: <annotation> <documentation> Defines the structure of XML documents that capture the difference between two XML documents. Changes that are not supported by Oracle XmlDiff may not be expressible in this schema. 'oracle-xmldiff' PI in Xdiff document: We use 'oracle-xmldiff' PI to describe certain aspects of the diff. The PI denotes values for 'operations-in-docorder' and 'output-model'. The output of XmlDiff has the PI always. If the user hand-codes a diff doc then it must also have the PI in it as the first child of top level xdiff element, to be able to call XmlPatch. operations-in-docorder: Can be either 'true' or 'false'. If true, the operations in the diff document refer to the elements of the input doc in the same order as document order. Output of global algorithm meets this requirement while local does not. output-model: output models for representing the diff. Can be either 'Snapshot' or 'Current'. Snapshot model: Each operation uses Xpaths as if no operations have been applied to the input document. (like UNIX diff) This is the model used in the output of XmlDiff. XmlPatch works with this (and the current model too). For XmlPatch to handle this model, "operations-in-docorder" must be true and the Xpaths must be simple. (see XmlDif C API documentation). Current model: Each operation uses Xpaths as if all operations till the previous one have been applied to the input document. Works with XmlPatch even if the 'operations-in-docorder' criterion is not met and the xpaths are not simple. <!-- Example: <?oracle-xmldiff <restriction base="string"> <enumeration value="element"/> <enumeration value="attribute"/> <enumeration value="text"/> <enumeration value="cdata"/> <enumeration value="entity-reference"/> <enumeration value="entity"/> <enumeration value="processing-instruction"/> <enumeration value="notation"/> <enumeration value="comment"/> </restriction> </simpleType> <element name="xdiff"> <complexType> <choice minOccurs="0" maxOccurs="unbounded"> <element name="append-node"> <complexType> <sequence> <element name="content" type="anyType"/> </sequence> <attribute name="node-type" type="xd:xdiff-nodetype"/> <attribute name="xpath" type="string"/> <attribute name="parent-xpath" type="string"/> <attribute name="attr-local" type="string"/> <attribute name="attr-nsuri" type="string"/> </complexType> </element> <element name="insert-node-before"> <complexType> <sequence> <element name="content" type="anyType"/> </sequence> <attribute name="xpath" type="string"/> <attribute name="node-type" type="xd:xdiff-nodetype"/> </complexType> </element> <element name="delete-node"> <complexType> <attribute name="node-type" type="xd:xdiff-nodetype"/> <attribute name="xpath" type="string"/> <attribute name="parent-xpath" type="string"/> <attribute name="attr-local" type="string"/> <attribute name="attr-nsuri" type="string"/> </complexType> </element> <element name="update-node"> <complexType> <sequence> <element name="content" type="anyType"/> </sequence> <attribute name="node-type" type="xd:xdiff-nodetype"/> <attribute name="parent-xpath" type="string"/> <attribute name="xpath" type="string"/> <attribute name="attr-local" type="string"/> <attribute name="attr-nsuri" type="string"/> </complexType> </element> <element name="rename-node"> <complexType> <sequence> <element name="content" type="anyType"/> </sequence> <attribute name="xpath" type="string"/> <attribute name="node-type" type="xd:xdiff-nodetype"/> </complexType> </element> </choice> <attribute name="xdiff-version" type="string"/> </complexType> </element> </schema> In an application, XmlDiff takes the source types and locations of the input documents as arguments. The source type can be a URL, file, orastream and stream context pointers, buffer, and buffer_length pointers or the pointer to a DOM document element ( docelement). XmlDiff returns the document node for the DOM for the Xdiff instance document. XmlDiff builds the DOM for the two documents, if they are not already provided as DOM, before performing a comparison. See Also:Oracle Database XML C API Reference, for the C API for the flags that control the behavior of XmlDiff Example 21-4 XMLDiff Application # include <xmldf.h> ... xmlctx *xctx; xmldocnode *doc1, *doc2, *doc3; uword hash_level; oratex *s, *inp1 = "book1.xml", *inp2="book2.xml"; xmlerr err; ub4 flags; flags = 0; /* defaults : global algorithm */ hash_level = 0; /* no hashing */ /* create XML meta context */ if (!(xctx = XmlCreate(&err, (oratext *) "XmlDiff",."); } /* run XmlDiff on the DOM trees. */ doc3 = XmlDiff(xctx, &err, flags, XMLDF_SRCT_DOM, doc1, NULL, XMLDF_SRCT_DOM, doc2, NULL,hash_level, NULL); if(!doc3) printf("XmlDiff Failed, error %u\n", (unsigned)err); else { if(err != XMLERR_OK) printf("XmlDiff returned error %u\n", (unsigned)err); /* Now we have the DOM tree in doc3 which represent the Diff */ ... } XmlFreeDocument(xctx, doc1); XmlFreeDocument(xctx, doc2); XmlFreeDocument(xctx, doc3); XmlDestroy(xctx); A customized output builder stores differences in any format suitable to the application. You can create your own customized output builder, rather than using the default Xdiff instance document, generated by XmlDiff and which conforms to the Xdiff schema. To do this, you must provide a callback that can be called after XmlDiff determines the differences. The differences are passed to the callback as an array of xmdlfop. The callback may be called multiple times as the differences are being generated. Using a customized output builder may perform better than using the default, because it does not have to maintain the internal state necessary for XPath generation. By default, XmlDiff captures the differences in XML conforming to Xdiff schema. If necessary, plug in your own output builder. The differences are represented as an array xmldfop. You must write an output builder callback function. The function signature is: xmlerr(*xdfobcb)(void *uctx, xmldfop *escript, ub4 escript_siz); uctx is the user specific context. escript is the array of size escript_siz: diff[escript_siz] mctx is the memory context. Supply this memory context through properties to XmlDiff(). Use this memory context to allocate escript. You must later free escript. Invoke the output builder callback after the differences have been found which happens even before the call to XmlDiff() returns. The output builder callback can be called multiple times. Example 21-5 Customized XMLDiff Output /* Sample useage: */ ... #include <orastruc.h> / * for 'oraprop' * / ... static oraprop diff_props[] = { ORAPROP(XMLDF_PROPN_CUSTOM_OB, XMLDF_PROPI_CUSTOM_OB, POINTER), ORAPROP(XMLDF_PROPN_CUSTOM_OBMCX, XMLDF_PROPI_CUSTOM_OBMCX, POINTER), ORAPROP(XMLDF_PROPN_CUSTOM_OBUCX, XMLDF_PROPI_CUSTOM_OBUCX, POINTER), { NULL } }; ... oramemctx *mymemctx; ... xmlerr myob(void *uctx, xmldfop *escript, ub4 escript_siz) { /* process diff which is available in escript * / /* free escript - the caller has to do this * / OraMemFree(mymemctx, escript); } main() { ... myctxt *myctx; diff_props[0].value_oraprop.p_oraprop_v = myob; diff_props[1].value_oraprop.p_oraprop_v = mymemctx; diff_props[2].value_oraprop.p_oraprop_v = myctx; XmlDiff(xctx, &err, 0, doc1, NULL, 0, doc2, NULL, 0, diff_props); ... } XmlPatch takes the Xdiff instance document, either as generated by XmlDiff or created by another mechanism, and follows the instructions in the Xdiff instance document to modify other XML documents as specified. Using the XmlPatch Command Line Utility Using XmlPatch in an Application Table 21-2 describes the XmlPatch command line options: XmlPatch takes the source types and locations of the input document and the diff document as arguments. The source type can be a URL, file, orastream and stream context pointers, buffer and buffer_length pointers, or the pointer to a DOM document element ( docelement). See Also:Oracle Database XML C API Reference, for the C API for the flags that control the behavior of XmlPatch The modes that were set by the Xdiff schema affect how XmlPatch works. If the output-model is Snapshot, XmlPatch only works if operations-in-docorder is TRUE. If the output-model is Current, it is not necessary that operations-in-docorder be set to TRUE. Example 21-6 Sample Application for XmlPatch ... #include <xmldf.h> ... xmlctx *xctx; xmldocnode *doc1, *doc2; oratext *s; oratext *inp1 = "book1.xml"; /* input document */ oratext *inp2 = "diff.xml", /* diff document */ xmlerr err; /* create XML meta context */ if (!(xctx = XmlCreate(&err, (oratext *) "XmlPatch",."); } /* call XmlPatch */ if(!XmlPatch(xctx, &err, 0, XMLDF_SRCT_DOM, doc1, NULL, XMLDF_SRCT_DOM, doc2, NULL, NULL)); printf("XmlPatch Failed, error %u\n", (unsigned)err); else { if(err != XMLERR_OK) printf("XmlPatch returned error %u\n", (unsigned)err); /* Now we have the patched document in doc1 */ ... } XmlFreeDocument(xctx, doc1); XmlFreeDocument(xctx, doc2); XmlDestroy(xctx); Oracle XDK provides XmlHash, which computes a hash value for an XML tree or subtree. If the hash values of two subtrees are equal, it is highly probable that they are the same XML. This can be used to do a quick comparison, for example, if you want to see if the XML tree is already in the database. You can run XmlDiff again, if necessary, on any matches, to be absolutely certain there is a match. You can compute the hash value of the new document and query the database for it. Example 21-7 shows a sample program that uses XmlHash. Example 21-7 XmlHash Program sword main(sword argc, char *argv[]) { xmlctx *xctx; xmldfsrct srct; oratext *data_encoding, *input_encoding, *s, *inp1; ub1 flags; xmlerr err; ub4 num_args; xmlhasht digest; flags = 0; /* defaults */ srct = XMLDF_SRCT_FILE; inp1 = "somexml.xml"; xctx = XmlCreate(&err, (oratext *) "XmlHash", NULL); if (!xctx) { /* handle error with creating xml context and exit */ ... } /* run XmlHash */ err = XmlHash(xctx, &digest, 0, srct, inp1, NULL, NULL); if(err) printf("XmlHash returned error:%d \n", err); else txdfha_pd(digest); XmlDestroy(xctx); return (sword )err; } /* print bytes in xml hash */ static void txdfha_pd(xmlhasht digest) { ub4 i; for(i = 0; i < digest.l_xmlhasht; i++) printf("%x ", digest.d_xmlhasht[i]); printf("\n"); } XmlDiff and XmlPatch can be called as command line tools and from the C language. They are also available as SQL functions. See Also: Oracle Database SQL Language Reference, XMLDiff Oracle Database SQL Language Reference, XMLPatch
http://docs.oracle.com/cd/E12839_01/appdev.1111/b28394/adx_c_diff.htm
CC-MAIN-2015-32
en
refinedweb
Hi/2. Diego Biurrun wrote: > On Mon, Sep 17, 2007 at 04:41:34PM -0700, Dave Yeo wrote: > >> Reimar Doeffinger wrote: >> >>> On Mon, Sep 17, 2007 at 12:35:46PM +0200, Diego Biurrun wrote: >>> >>>> Even more important: Reimar came up with a header file that provides the >>>> correct definition. So why can't this be used instead of adding this >>>> (possibly brittle as explained by Mans) check? >>>> >>> I'm not sure that is a proper header file, at least it's not a system >>> one... >>> But it seems that _socklen_t is in some header file, maybe using that is >>> good enough? >>> >> Grepping include I found in <386/ansi.h> >> /* >> * Types which are fundamental to the implementation and must be declared >> * in more than one standard header are defined here. Standard headers >> * then use: >> * #ifdef _BSD_SIZE_T_ >> * typedef _BSD_SIZE_T_ size_t; >> * #undef _BSD_SIZE_T_ >> * #endif >> */ >> ... >> #define _BSD_SOCKLEN_T_ __uint32_t /* socklen_t (duh) */ >> ... >> >> And in <sys/_type.h> >> ... >> typedef __uint32_t __socklen_t; >> ... >> So it seems that it should be uint32_t. Also __uint32_t is typedef as >> unsigned int in <386/_types.h>. >> > > I get to repeat my question: Is there a way to include this header file > instead of adding the check from the patch? > > That header file doesn't include 'socklen_t' but '__socklen_t'. So we need not to include it for 'socklen_t'. -- KO Myung-Hun Using Mozilla SeaMonkey 1.1.4 Under OS/2 Warp 4 for Korean with FixPak #15 On AMD ThunderBird 750 MHz with 512 MB RAM Korean OS/2 User Community :
http://ffmpeg.org/pipermail/ffmpeg-devel/2007-September/039280.html
CC-MAIN-2015-32
en
refinedweb
Source django-taggable / docs / index.rst django-taggable django-taggable is a library that implements a efficient tagging implementations for the Django Web Framework 1.1+, written by Gustavo Picón and licensed under the Apache License 2.0. django-taggable is: - Flexible: Uses tagtools to choose between popular tagging styles (flickr, delicious, etc), or define your own. You can also easily have several tag fields per object or have different tag "namespaces" to be used between one, some, or all your taggable objects. Your project, your choice. - Fast: No GenericForeignKey madness. - Easy: Uses Django Model Inheritance with abstract classes to define your own models. The API isn't "magical". - Clean: Testable and well tested code base. Code/branch test coverage is 100%.
https://bitbucket.org/tabo/django-taggable/src/110e0e2a61197fde7f5737e5ddfc8c1a48a5b154/docs/index.rst
CC-MAIN-2015-32
en
refinedweb
A while back Peter Norvig posted a wonderful pair of articles about regex golf. The idea behind regex golf is to come up with the shortest possible regular expression that matches one given list of strings, but not the other. “Regex Golf,” by Randall Munroe. In the first article, Norvig runs a basic algorithm to recreate and improve the results from the comic, and in the second he beefs it up with some improved search heuristics. My favorite part about this topic is that regex golf can be phrased in terms of a problem called set cover. I noticed this when reading the comic, and was delighted to see Norvig use that as the basis of his algorithm. The set cover problem shows up in other places, too. If you have a database of items labeled by users, and you want to find the smallest set of labels to display that covers every item in the database, you’re doing set cover. I hear there are applications in biochemistry and biology but haven’t seen them myself. If you know what a set is (just think of the “set” or “hash set” type from your favorite programming language), then set cover has a simple definition. Definition (The Set Cover Problem): You are given a finite set called a “universe” and sets each of which is a subset of . You choose some of the to ensure that every is in one of your chosen sets, and you want to minimize the number of you picked. It’s called a “cover” because the sets you pick “cover” every element of . Let’s do a simple. Let and Then the smallest possible number of sets you can pick is 2, and you can achieve this by picking both or both . The connection to regex golf is that you pick to be the set of strings you want to match, and you pick a set of regexes that match some of the strings in but none of the strings you want to avoid matching (I’ll call them ). If is such a regex, then you can form the set of strings that matches. Then if you find a small set cover with the strings , then you can “or” them together to get a single regex that matches all of but none of . Set cover is what’s called NP-hard, and one implication is that we shouldn’t hope to find an efficient algorithm that will always give you the shortest regex for every regex golf problem. But despite this, there are approximation algorithms for set cover. What I mean by this is that there is a regex-golf algorithm that outputs a subset of the regexes matching all of , and the number of regexes it outputs is such-and-such close to the minimum possible number. We’ll make “such-and-such” more formal later in the post. What made me sad was that Norvig didn’t go any deeper than saying, “We can try to approximate set cover, and the greedy algorithm is pretty good.” It’s true, but the ideas are richer than that! Set cover is a simple example to showcase interesting techniques from theoretical computer science. And perhaps ironically, in Norvig’s second post a header promised the article would discuss the theory of set cover, but I didn’t see any of what I think of as theory. Instead he partially analyzes the structure of the regex golf instances he cares about. This is useful, but not really theoretical in any way unless he can say something universal about those instances. I don’t mean to bash Norvig. His articles were great! And in-depth theory was way beyond scope. So this post is just my opportunity to fill in some theory gaps. We’ll do three things: - Show formally that set cover is NP-hard. - Prove the approximation guarantee of the greedy algorithm. - Show another (very different) approximation algorithm based on linear programming. Along the way I’ll argue that by knowing (or at least seeing) the details of these proofs, one can get a better sense of what features to look for in the set cover instance you’re trying to solve. We’ll also see how set cover depicts the broader themes of theoretical computer science. NP-hardness The first thing we should do is show that set cover is NP-hard. Intuitively what this means is that we can take some hard problem and encode instances of inside set cover problems. This idea is called a reduction, because solving problem will “reduce” to solving set cover, and the method we use to encode instance of as set cover problems will have a small amount of overhead. This is one way to say that set cover is “at least as hard as” . The hard problem we’ll reduce to set cover is called 3-satisfiability (3-SAT). In 3-SAT, the input is a formula whose variables are either true or false, and the formula is expressed as an OR of a bunch of clauses, each of which is an AND of three variables (or their negations). This is called 3-CNF form. A simple example: The goal of the algorithm is to decide whether there is an assignment to the variables which makes the formula true. 3-SAT is one of the most fundamental problems we believe to be hard and, roughly speaking, by reducing it to set cover we include set cover in a class called NP-complete, and if any one of these problems can be solved efficiently, then they all can (this is the famous P versus NP problem, and an efficient algorithm would imply P equals NP). So a reduction would consist of the following: you give me a formula in 3-CNF form, and I have to produce (in a way that depends on !) a universe and a choice of subsets in such a way that has a true assignment of variables if and only if the corresponding set cover problem has a cover using sets. In other words, I’m going to design a function from 3-SAT instances to set cover instances, such that is satisfiable if and only if has a set cover with sets. Why do I say it only for sets? Well, if you can always answer this question then I claim you can find the minimum size of a set cover needed by doing a binary search for the smallest value of . So finding the minimum size of a set cover reduces to the problem of telling if theres a set cover of size . Now let’s do the reduction from 3-SAT to set cover. If you give me where each is a clause and the variables are denoted , then I will choose as my universe to be the set of all the clauses and indices of the variables (these are all just formal symbols). i.e. The first part of will ensure I make all the clauses true, and the last part will ensure I don’t pick a variable to be both true and false at the same time. To show how this works I have to pick my subsets. For each variable , I’ll make two sets, one called and one called . They will both contain in addition to the clauses which they make true when the corresponding literal is true (by literal I just mean the variable or its negation). For example, if uses the literal , then will contain but will not. Finally, I’ll set , the number of variables. Now to prove this reduction works I have to prove two things: if my starting formula has a satisfying assignment I have to show the set cover problem has a cover of size . Indeed, take the sets for all literals that are set to true in a satisfying assignment. There can be at most true literals since half are true and half are false, so there will be at most sets, and these sets clearly cover all of because every literal has to be satisfied by some literal or else the formula isn’t true. The reverse direction is similar: if I have a set cover of size , I need to use it to come up with a satisfying truth assignment for the original formula. But indeed, the sets that get chosen can’t include both a and its negation set , because there are of the elements , and each is only in the two . Just by counting if I cover all the indices , I already account for sets! And finally, since I have covered all the clauses, the literals corresponding to the sets I chose give exactly a satisfying assignment. Whew! So set cover is NP-hard because I encoded this logic problem 3-SAT within its rules. If we think 3-SAT is hard (and we do) then set cover must also be hard. So if we can’t hope to solve it exactly we should try to approximate the best solution. The greedy approach The method that Norvig uses in attacking the meta-regex golf problem is the greedy algorithm. The greedy algorithm is exactly what you’d expect: you maintain a list of the subsets you’ve picked so far, and at each step you pick the set that maximizes the number of new elements of that aren’t already covered by the sets in . In python pseudocode: def greedySetCover(universe, sets): chosenSets = set() leftToCover = universe.copy() unchosenSets = sets covered = lambda s: leftToCover & s while universe != 0: if len(chosenSets) == len(sets): raise Exception("No set cover possible") nextSet = max(unchosenSets, key=lambda s: len(covered(s))) unchosenSets.remove(nextSet) chosenSets.add(nextSet) leftToCover -= nextSet return chosenSets This is what theory has to say about the greedy algorithm: Theorem: If it is possible to cover by the sets in , then the greedy algorithm always produces a cover that at worst has size , where is the size of the smallest cover. Moreover, this is asymptotically the best any algorithm can do. One simple fact we need from calculus is that the following sum is asymptotically the same as : Proof. [adapted from Wan] Let’s say the greedy algorithm picks sets in that order. We’ll set up a little value system for the elements of . Specifically, the value of each is 1, and in step we evenly distribute this unit value across all newly covered elements of . So for each covered element gets value , and if covers four new elements, each gets a value of 1/4. One can think of this “value” as a price, or energy, or unit mass, or whatever. It’s just an accounting system (albeit a clever one) we use to make some inequalities clear later. In general call the value of element the value assigned to at the step where it’s first covered. In particular, the number of sets chosen by the greedy algorithm is just . We’re just bunching back together the unit value we distributed for each step of the algorithm. Now we want to compare the sets chosen by greedy to the optimal choice. Call a smallest set cover . Let’s stare at the following inequality. It’s true because each counts for a at most once in the left hand side, and in the right hand side the sets in must hit each at least once but may hit some more than once. Also remember the left hand side is equal to . Now we want to show that the inner sum on the right hand side, , is at most . This will in fact prove the entire theorem: because each set has size at most , the inequality above will turn into And so , which is the statement of the theorem. So we want to show that . For each define to be the number of elements in not covered in . Notice that is the number of elements of that are covered for the first time in step . If we call the smallest integer for which , we can count up the differences up to step , we get The rightmost term is just the cost assigned to the relevant elements at step . Moreover, because covers more new elements than (by definition of the greedy algorithm), the fraction above is at most . The end is near. For brevity I’ll drop the from . And that proves the claim. I have three postscripts to this proof: - This is basically the exact worst-case approximation that the greedy algorithm achieves. In fact, Petr Slavik proved in 1996 that the greedy gives you a set of size exactly in the worst case. - This is also the best approximation that any set cover algorithm can achieve, provided that P is not NP. This result was basically known in 1994, but it wasn’t until 2013 and the use of some very sophisticated tools that the best possible bound was found with the smallest assumptions. - In the proof we used that to bound things, but if we knew that our sets (i.e. subsets matched by a regex) had sizes bounded by, say, , the same proof would show that the approximation factor is instead of . However, in order for that to be useful you need to be a constant, or at least to grow more slowly than any polynomial in , since e.g. . In fact, taking a second look at Norvig’s meta regex golf problem, some of his instances had this property! Which means the greedy algorithm gives a much better approximation ratio for certain meta regex golf problems than it does for the worst case general problem. This is one instance where knowing the proof of a theorem helps us understand how to specialize it to our interests. Norvig’s frequency table for president meta-regex golf. The left side counts the size of each set (defined by a regex) The linear programming approach So we just said that you can’t possibly do better than the greedy algorithm for approximating set cover. There must be nothing left to say, job well done, right? Wrong! Our second analysis, based on linear programming, shows that instances with special features can have better approximation results. In particular, if we’re guaranteed that each element occurs in at most of the sets , then the linear programming approach will give a -approximation, i.e. a cover whose size is at worst larger than OPT by a multiplicative factor of . In the case that is constant, we can beat our earlier greedy algorithm. The technique is now a classic one in optimization, called LP-relaxation (LP stands for linear programming). The idea is simple. Most optimization problems can be written as integer linear programs, that is there you have variables and you want to maximize (or minimize) a linear function of the subject to some linear constraints. The thing you’re trying to optimize is called the objective. While in general solving integer linear programs is NP-hard, we can relax the “integer” requirement to , or something similar. The resulting linear program, called the relaxed program, can be solved efficiently using the simplex algorithm or another more complicated method. The output of solving the relaxed program is an assignment of real numbers for the that optimizes the objective function. A key fact is that the solution to the relaxed linear program will be at least as good as the solution to the original integer program, because the optimal solution to the integer program is a valid candidate for the optimal solution to the linear program. Then the idea is that if we use some clever scheme to round the to integers, we can measure how much this degrades the objective and prove that it doesn’t degrade too much when compared to the optimum of the relaxed program, which means it doesn’t degrade too much when compared to the optimum of the integer program as well. If this sounds wishy washy and vague don’t worry, we’re about to make it super concrete for set cover. We’ll make a binary variable for each set in the input, and if and only if we include it in our proposed cover. Then the objective function we want to minimize is . If we call our elements , then we need to write down a linear constraint that says each element is hit by at least one set in the proposed cover. These constraints have to depend on the sets , but that’s not a problem. One good constraint for element is In words, the only way that an will not be covered is if all the sets containing it have their . And we need one of these constraints for each . Putting it together, the integer linear program is Once we understand this formulation of set cover, the relaxation is trivial. We just replace the last constraint with inequalities. For a given candidate assignment to the , call the objective value (in this case ). Now we can be more concrete about the guarantees of this relaxation method. Let be the optimal value of the integer program and a corresponding assignment to achieving the optimum. Likewise let be the optimal things for the linear relaxation. We will prove: Theorem: There is a deterministic algorithm that rounds to integer values so that the objective value , where is the maximum number of sets that any element occurs in. So this gives a -approximation of set cover. Proof. Let be as described in the theorem, and call to make the indexing notation easier. The rounding algorithm is to set if and zero otherwise. To prove the theorem we need to show two things hold about this new candidate solution : - The choice of all for which covers every element. - The number of sets chosen (i.e. ) is at most times more than . Since , so if we can prove number 2 we get , which is the theorem. So let’s prove 1. Fix any and we’ll show that element is covered by some set in the rounded solution. Call the number of times element occurs in the input sets. By definition , so . Recall was the optimal solution to the relaxed linear program, and so it must be the case that the linear constraint for each is satisfied: . We know that there are terms and they sums to at least 1, so not all terms can be smaller than (otherwise they’d sum to something less than 1). In other words, some variable in the sum is at least , and so is set to 1 in the rounded solution, corresponding to a set that contains . This finishes the proof of 1. Now let’s prove 2. For each , we know that for each , the corresponding variable . In particular . Now we can simply bound the sum. The second inequality is true because some of the are zero, but we can ignore them when we upper bound and just include all the . This proves part 2 and the theorem. I’ve got some more postscripts to this proof: - The proof works equally well when the sets are weighted, i.e. your cost for picking a set is not 1 for every set but depends on some arbitrarily given constants . - We gave a deterministic algorithm rounding to , but one can get the same result (with high probability) using a randomized algorithm. The idea is to flip a coin with bias roughly times and set if and only if the coin lands heads at least once. The guarantee is no better than what we proved, but for some other problems randomness can help you get approximations where we don’t know of any deterministic algorithms to get the same guarantees. I can’t think of any off the top of my head, but I’m pretty sure they’re out there. - For step 1 we showed that at least one term in the inequality for would be rounded up to 1, and this guaranteed we covered all the elements. A natural question is: why not also round up at most one term of each of these inequalities? It might be that in the worst case you don’t get a better guarantee, but it would be a quick extra heuristic you could use to post-process a rounded solution. - Solving linear programs is slow. There are faster methods based on so-called “primal-dual” methods that use information about the dual of the linear program to construct a solution to the problem. Goemans and Williamson have a nice self-contained chapter on their website about this with a ton of applications. Additional Reading Williamson and Shmoys have a large textbook called The Design of Approximation Algorithms. One problem is that this field is like a big heap of unrelated techniques, so it’s not like the book will build up some neat theoretical foundation that works for every problem. Rather, it’s messy and there are lots of details, but there are definitely diamonds in the rough, such as the problem of (and algorithms for) coloring 3-colorable graphs with “approximately 3″ colors, and the infamous unique games conjecture. I wrote a post a while back giving conditions which, if a problem satisfies those conditions, the greedy algorithm will give a constant-factor approximation. This is much better than the worst case -approximation we saw in this post. Moreover, I also wrote a post about matroids, which is a characterization of problems where the greedy algorithm is actually optimal. Set cover is one of the main tools that IBM’s AntiVirus software uses to detect viruses. Similarly to the regex golf problem, they find a set of strings that occurs source code in some viruses but not (usually) in good programs. Then they look for a small set of strings that covers all the viruses, and their virus scan just has to search binaries for those strings. Hopefully the size of your set cover is really small compared to the number of viruses you want to protect against. I can’t find a reference that details this, but that is understandable because it is proprietary software. Until next time!
http://jeremykun.com/category/optimization-2/
CC-MAIN-2015-32
en
refinedweb
YATG::Store::Memcached - Back-end module to store polled data to a Memcached version 5.140510 This module implements part of a callback handler used to store SNMP data into a memcached service. It will be faster than storing to disk, and so is recommended if you do not require historical data. The module will die if it cannot connect to your memcached server, so see below for the configuration guidelines. Note that all example keys here use the namespace prefix of yatg: although this is configurable. One data structure is passed in, which represents a set of results for a set of polled OIDs on some devices. It looks a bit like this: $results->{$device}->{$leaf}->{$port} = {value} In your memcached server, a few well-known keys store lists of polled devices and so on, to help you bootstrap to find stored results. The key yatg:yatg_devices will contain an array reference containing all device IPs provided in the results data. Further, each key of the form yatg:ports_for:$device will contain an array reference containing all ports polled on that device. The port name is not munged in any way. The "port" entity might in fact just be an index value, or 1 if this OID is not Interface Indexed. Finally, the result of a poll is stored in memcached with a key of the following format: yatg:$device:$leaf:$port Note that the $leaf is the SNMP leaf name and not the OID. That key will be munged to remove whitespace, as that is not permitted in memcached keys. All of the above values are stored with a TTL of the polling interval as gathered from the main yatg_updater configuration. With all this information it is possible to write a script to find all the data stored in the memcache using the two lookup tables and then retrieving the desired keys. There is an example of this in the examples/ folder of this distribution, called check_interfaces. It is a Nagios2 check script. Install the following additonal module to use this plugin: In the main yatg_updater configuration, you must provide details of the location of your memcached server. Follow the example ( yatg.yml) file in this distribution. Remember you can override the namespace used from the default of yatg:, like so: cache_memcached: namespace: 'my_space:' Oliver Gorwits <[email protected]> This software is copyright (c) 2014 by University of Oxford. This is free software; you can redistribute it and/or modify it under the same terms as the Perl 5 programming language system itself.
http://search.cpan.org/~oliver/YATG-5.140510/lib/YATG/Store/Memcached.pm
CC-MAIN-2015-32
en
refinedweb
At PDC 2009 we (RIA Services Team) announced the re-branding of Microsoft .NET RIA Services to Windows Communication Foundation (WCF) RIA Services. We backed this branding change with a new Preview release, that has RIA Services built heavily on the WCF stack. In this post I plan to talk briefly about the motivation behind aligning RIA Services with WCF and then dig deep into how exactly RIA Services consumes WCF. RIA Services on WCF: Best of both worlds Ever since we announced RIA Services at MIX '09, we have heard strong customers feedback that they would like a consolidated services story from Microsoft. Acting on that feedback, over the last few months RIA Services has spent a significant amount of effort aligning closely with WCF. The Data Services team at their end has been working on a similar alignment with WCF as well. By centering all our service offerings around WCF we are maximizing developer knowledge transfer and skill reuse, both in the short and the long term. For more details on the alignment and the motivation behind it please also check out this post by the WCF team. I strongly believe that with WCF RIA Services our users get the best of both worlds - Understanding RIA Services use of WCF To help users better understand how RIA Services uses WCF, in the following section I walkthrough what happens under the covers when a user creates a simple DomainService and then communicated with it. 1. ‘Add new DomainService Class’ Lets assume an application developer opens up the ‘Add new Domain Service Class’ item template and adds a new Domain Service. The item template, besides producing a skeletal Domain Service Class, adds the right assembly references and registers an Http modules in the Web.Config. By default it registers an httpModule for Cassini (for Visual Studio F5 experience) and one for IIS, as shown below. <?xml version="1.0"?> <configuration> <system.web> <httpModules> <add name="DomainServiceModule" type="System.Web.Ria.Services.DomainServiceHttpModule, System.Web.Ria" /> </httpModules> <compilation debug="true" targetFramework="4.0" /> </system.web> <system.webServer> <validation validateIntegratedModeConfiguration="false"/> <modules runAllManagedModulesForAllRequests="true"> <add name="DomainServiceModule" preCondition="managedHandler" </modules> <validation validateIntegratedModeConfiguration="false" /> </system.webServer> 2. Domain Service Code The developer then goes ahead and adds Business logic to his DomainService.The methods exposed via the DomainService can be broken into two broad categories – CRUD operations – Query, Update, Named Update and Delete operations. These operations follow the RIA Services prescriptive guideline and rely on the RIA Services framework. This is added functionality that RIA Services introduces on top of WCF and is not available to Core WCF Services. Service Operation/ Invoke Operations - These are [Invoke] operation in the RIA Services terminology and Service Operations in WCF terminology. These methods are independent of the RIA Services concept of ChangeSet (ChangeSet applies only to the CRUD operations above) and are ‘Online/Direct’ methods that communicate with the Server immediately when invoked. Below is the code for the OrganizationService DomainService we use in our canonical RIA Services walkthrough. namespace HRApp.Web { [EnableClientAccess()] public class OrganizationService : LinqToEntitiesDomainService<AdventureWorks_DataEntities> { #region CRUD public IQueryable<Employee> GetEmployee(){…} public void InsertEmployee(Employee employee) {…} public void UpdateEmployee(Employee currentEmployee) {…} public void DeleteEmployee(Employee employee) {…} public IQueryable<Employee> GetSalariedEmployee(){…} [RequiresAuthentication()] public void ApproveSabbatical(Employee current) {…} #endregion #region ServiceOperations public string Echo(string msg) {…} public DateTime GetServerDateTime(){…} #endregion } } 3. WCF Channel on Client The RIA Services Framework on the Client contains a WebDomainClient:DomainClient whose purpose is to help the SL client communicate with a WCF service represention of the Domain Service. The WebomainClient uses a WCF Client Chanel for this cummunication. The Channel is created by using WCF’s ChannelFactory and by passing to it a WCF Service Contract that was generated from the Domain Service (more on the contract creation later). The ChanelFactory creates a WCF client proxy based on the supplied contract. The generated proxy takes care of communication/(de)serialization between Client and Server. The RIA Services DomainContext utilizes the WebDomainClient for Client-Server communication and the context itself is WCF agnostic. 4. Dynamic .SVC generation. Below is what the dynamically generated svc file looks like for the OrganizationService defined above - <%@ ServiceHost Service=”HRApp.Web.OrganizationService” Factory=”System.Web.Ria.DomainServiceHostFactory” %> The .SVC refers to the DomainService Type and a ServiceHostFactory. The default RIA Services HostFactory instantiates the default RIA Services ServiceHost, which in turn is responsible for extracting the WCF Service Contract from the DomainService (See the ‘Generating a WCF Contract’ paragraph below for more details) and also for hosting the service.. 5. Generating the WCF Contract: Each WCF service needs to have a ServiceDescription and one or more ContractDescriptions for it. ServiceHosts are responsible for extracting descriptions from a service and hosting the service. The standard ServiceHost in WCF produces descriptions based on WCF attributes such as [ServiceContract] and [OperationContract]. In RIA Services we provide a custom ServiceHost which does this based on a RIA Service Attributes and Conventions. Here is how DomainService operations are mapped to the WCF Contract: Query operations – Each Query operation shows up as a ServiceOperation in the WCF Contract, but with its signature modified. The ServiceHost creates an operation description for each query operation. The return type is changed to QueryResult<T> such that it can return additional information such as count. It also adds a QueryOperationBehavior which through a custom operation Invoker takes care of applying cache policies, validating parameters and composing queries. Insert, Update, Delete operations – For all CUD operations the ServiceHost generates one top-level SubmitChanges operation description. Again, it adds a custom behavior which injects a custom operation invoker. The signature of SubmitChanges is ChangeSet SubmitChanges(ChangeSet changeSet). The returned ChangeSet contains auto-generated values from the server. Invoke Operations – Invoke Operations are the RIA Services equivalent of WCF ServiceOperations. All Invoke Operations show up in the WCF Contract as Service Operations. For the OrganizationService Domain Service we had defined earlier, below is what the WCF Contract looks like to the “Add Service Reference” dialog - Attached to this post is also the full generated WCF Contract for the OrganizationService (the file was produced by doing an Add Service Reference to the Domain Service's WCF endpoint) 6. Default Endpoints: The RIA Services ServiceHost creates the following endpoints by default - a) For Silverlight Client: SOAP w/binary endpoint. Address = “binary”, Binding = CustomBinding consisting of HttpTransportBindingElement and BinaryMessageEncodingBindingElement. b) For AJAX Client: JSON REST endpoint. Address = “”, Binding = WebHttpBinding w/JSON as the format. c) For other clients : SOAP w/XML endpoint. Address = “soap”, Binding = BasicHttpBinding consisting of HttpTransportBindingElement and TextMessageEncodingBindingElement. The blog post here describes how one can consume the SOAP XML endpoint in a WindowsForms application. Here is a Sample of the same endpoint being consumed in a WPF application. Summary Above we discussed in some detail how RIA Services uses WCF under the covers. Hopefully this helps folks better understand the RIA Services alignment with WCF. In a future post I plan to discuss how WCF extensibility (e.g. custom behaviors) can be applied to Domain Services. In the meanwhile here is a Sample that demos this.
http://blogs.msdn.com/b/saurabh/archive/2009/11/23/understanding-the-wcf-in-wcf-ria-services.aspx
CC-MAIN-2015-32
en
refinedweb
I'm trying to create my first migration using Entity Framework 7 with code first development and I'm getting the following error: The property 'Email' cannot be added to the entity type 'UserDTO' because a navigation property with the same name already exists on entity type 'UserDTO'. My environment is: 1) Visual Studio 2015 2) Entity Framework v7.0.0-rc1-final 3) Code first development 4) Using the fluent API, not data annotations I can't figure out what the root cause of the issue is but I've got a few ideas. My RoleDTO class is supposed to use its email address property as its PK, and it also has a collection of RoleDTO objects as a property. Below is the only class I have inheriting from DbContext right now. I've commented out the other entity I have to try and reduce the problem. class StorageContext : DbContext { public DbSet<UserDTO> Users { get; set; } protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder) { optionsBuilder.UseSqlServer("MySQLServerConnectionString") } protected override void OnModelCreating(ModelBuilder modelBuilder) { modelBuilder.Entity<UserDTO>().HasKey(entity => entity.Email); //modelBuilder.Entity<UserDTO>().HasMany(entity => entity.Roles); } } And here is the UserDTO class. Can anyone see what's causing the error when I'm trying to make the migration? internal class UserDTO { public EmailAddress Email { get; private set; } public string FullName { get; private set; } public string UserName { get; private set; } public virtual ICollection<string> Roles { get; private set; } // more below here like a constructor and some basic methods If I switch the key for UserDTO to a plain string instead of the complext object EmailAddress it looks like it gets past that error and I get a different, equally interesting one: The property 'Roles' on entity type 'Microsoft.SRE.NavisionOnline.ConfigurationAutomation.DAL.SQLEntities.UserDTO' has not been added to the model or ignored. As far as I know you can't use complex type as PK. For the second error message: You can't use ICollection<string> Roles {..}, EF won't map this property to a table because you are using "string" as type. You need to define Role class and assign it a PK public class Role { public int Id {get; set;} public string RoleName {get; set;} } And then in your UserDTO: public ICollection<Role> Roles {...}
https://entityframeworkcore.com/knowledge-base/34935623/entity-framework-7-trouble-creating-migration-because-navigation-property-with-the-same-name-exists
CC-MAIN-2021-17
en
refinedweb
This project aims to replace the front desk cashier of food trucks and fast food joints with voice-activated vending machine, that can understand the order, checks for allergies, take payment, and deliver orders through vending- machines (soft drinks, water bottle) and through window at counter, it takes help of facial recognition to deliver order to correct person. It also remembers your allergies and your last order.Steps: 1. Speech Recognition 2. Face Recognition 3. NFC card reader 4. Everloop 5. GUI for front desk and kitchen 6. Controlling Motors1. Speech Recognition: First, we need to make an account in snips.ai . Creating an app. - If you are logging into the console for the first time it will ask for the name of assistant and the language. Give the name of your choice and select English. - After creating the assistant our first job is to create an app, you can also add apps made by others. - Go to add app and then create a new app, initially our app will not have any training data hence it will be shown as weak. Creating new intents: - In the next step, we are required to create new intents. Go to create new intent. Give it a name and short description. - Intents refer to the intention of the user. In our project, we have to recognize the intents like add items, remove items, add allergies, etc. - Each intent will contain some slots that can be number, time, names, etc, these will appear in the sentences of that intent and will be used in the intent callback codes that we will discuss further. - Start adding slots by clicking on add new slots, We can use slot types provided by default or add custom slot. - Give a name to your slot type and define the values, for example, dishes name. Make sure to define all the dishes that you want to recognize. - Add different slots and train your model by adding enough sentences and highlighting the respective slots (most of the time it will be done automatically). - Similarly, we need to add intents like:- - addItems : when a person wants to add new items to order. - removeItems: when a person wants to remove items. - response: when a person response back a questin in words like no, yes or continue. - allergies: when a person tells his allergies. - specialRequest: when a person wants to add special requests like less spicy, sweet, etc. - suggestion: When a person asks for suggestions while making an order, like a bestseller, today's special, etc. (Currently not implemented). - Once we have trained our model with enough examples, we are ready to go to the next step i.e. downloading it in the raspberry pi. Installing SAM CLI tool for offline speech recognition: Followthis tutorial for installing SAM CLI tool to your raspberry pi, for working with matrix creator. Installingassistantto raspberry pi : - After installing SAM successfully, we need to download our assistant to raspberry pi for offline speech recognition (all sam commands will be run from PC terminal, not ssh) - Connect to raspberry pi by sam connect <raspi ip address> - login to your snips account. sam login sam install assistant - It will fetch your assistant, if you have more than one it will ask you to select one. - After installing the assistant you can check the result by saying "Hey snips" and any sentence. You can check the snips output by this command. sam watch - You can check if every service is running fine by sam status At the time of writing this article the snips audio server and tts were having some issue so I downgraded them. sudo apt install snips-audio-server=0.63.3 sudo apt install snips-tts=0.63.3 Coding Intents: - It is better to start with a code template, you can download one from here. - Now we have to code functions for our intent callbacks. For example, when someone will asks to add items to their order, snips will return message with the intent name addItemsand the slots that we have defined while training our intents and each slots will contain detected words that the user has said. - We will extract this information about items and their quantity from the slots and if they are present in our menu and will give a confirmatory reply to the user. def addItems(self, intent_message): for name,values in intent_message.slots.items(): if name == "item": items = list(map(lambda x: str(x.value), values.all())) if name == "amount": amount = list(map(lambda x: int(x.value), values.all())) try: if len(items) == len(amount): add = {} add = dict(zip(items,amount)) dialogue = "" for dish,amount in add.items(): if dish in self.order.keys(): self.order[dish] = self.order[dish] + amount else: if dish in kiosk.menu.keys(): self.order[dish] = amount dialogue += str(amount) +" " + str(dish) dialogue += " is added to your order. " else: dialogue = " Sorry, please use numbers for quantity. " except: dialogue = " Sorry, I didn't get that. " self.state = 0; return dialogue - Similarly, we need to write code for all our callbacks, some syntaxes for sending dialogue to tts are: 2. Face Recognition2. Face Recognition hermes.publish_continue_session(intent_message.session_id,"Say this" ,\ ["intents of next dialogue"],"") hermes.publish_end_session(intent_message.session_id,"Say this") hermes.publish_start_session_notification("site.id","Say this","") - First make sure you have open cv4 installed, if not you can get help some here. - To confirm the installation run python in shell, and import cv2. import cv2 >>> cv2.__version__ '4.0.0' - We will be doing three things in this script, first, we will detect a human face by Haar Cascade classifier, if the detected face cannot be identified by the LBPH face recognizer using current model, we will capture some images of the face to train our model and give it some unique id and save it to our database. - Face Recognition script publishes detected face ids to the topic camera/recognisedIds" and subscribes to topic camera/addId in case when a new user gives the order, the "main script" publishes a unique user id to camera/addIdand the face recognition script trains its model to add this new id. - Here all functions are squeezed in one script for a more detailed explanation please folow this project "Real-Time Face Recognition: An End-to-End Project". 3. NFC Card Reader3. NFC Card Reader ''' Based on code by Marcelo Rovai - MJRoBot.org and on code by Anirban Kar: ''' def on_connect(client, userdata, flags, rc): print("[faceRecognition]: Connected") client.subscribe("camera/addId") def on_message(client, userdata, msg): global face_detected global face_add global face_id if str(msg.topic) == "camera/addId": face_id = str(msg.payload) face_add = 1 def initMqtt(): client.on_connect = on_connect client.on_message = on_message client.connect("localhost", 1883) client.loop_start() def getImagesAndLabels(path): imagePaths = [os.path.join(path,f) for f in os.listdir(path)] faceSamples=[] ids = [] for imagePath in imagePaths: PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale img_numpy = np.array(PIL_img,'uint8') id = int(os.path.split(imagePath)[-1].split(".")[1]) faces = detector.detectMultiScale(img_numpy) for (x,y,w,h) in faces: faceSamples.append(img_numpy[y:y+h,x:x+w]) ids.append(id) return faceSamples,ids def trainer(): path = 'dataset' recognizer = cv2.face.LBPHFaceRecognizer_create() print ("\n [FaceRecognition] Training faces. It will take a few seconds. Wait ...") faces,ids = getImagesAndLabels(path) recognizer.train(faces, np.array(ids)) # Save the model into trainer/trainer.yml recognizer.write('trainer/trainer.yml') def faceAddition(face_id): face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') count = 0 while(count<30): count += 1 ret, img = cam.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_detector.detectMultiScale(gray, 1.3, 5) #cv2.imshow('image', img) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) # Save the captured image into the datasets folder cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) print("[FaceRecognition]: Face capture count = " + str(count)) trainer() def faceDetection(): global cam #print('[FaceRecognition]: Face Detection ON') recognizer = cv2.face.LBPHFaceRecognizer_create() try: recognizer.read('trainer/trainer.yml') except: return [0] cascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(cascadePath); font = cv2.FONT_HERSHEY_SIMPLEX #iniciate id counter id = 0 # Define min window size to be recognized as a face minW = 0.1*cam.get(3) minH = 0.1*cam.get(4) counter = 0 recognised_ids = [] while counter<=20: counter+=1 ret, img =cam.read() < 100): pass else: id = 0 # storing list of recognised/unrecognised faces if not id in recognised_ids: recognised_ids.append(id) return recognised_ids - First, make sure you have NFC Reader Library for PN512 is installed if not follow this. - In our project payment is done by reading nfc tags, data.txt file contains the information regarding UID and their respective balance. - We read nfc data and extract UID in hex format and call function that opens data.txt file and searches for required UID and its balance if the UID has sufficient balance the file is updated with a new balance. - Payment also gets failed if user is unable to scan card within 60 seconds. int scan(double amount){ matrix_hal::NFC nfc; matrix_hal::NFCData nfc_data; std::cout << "[NFC]: NFC started!" << std::endl; int sucess = 0; auto past_time = std::chrono::system_clock::now(); auto current_time = std::chrono::system_clock::now(); std::chrono::duration<double> duration = (current_time-past_time); while(duration.count()<60){ current_time = std::chrono::system_clock::now(); duration = current_time-past_time; nfc.Activate(); nfc.ReadInfo(&nfc_data.info); nfc.Deactivate(); if (nfc_data.info.recently_updated) { std::cout << "[NFC] : " + nfc_data.info.ToString() << std::endl; std::string user_id = nfc_data.info.UIDToHex(); sucess = payment(user_id, amount); break; } std::this_thread::sleep_for(std::chrono::microseconds(10000)); } return sucess; } <UID in HEX> <BALANCE> - NFC program subscribe to the topic payment/startwhich contains bill amount and publishes to the topic payment/statuswhich represents whether the payment was successful or not. - Before we continue, make sure you have MATRIX HAL installed, if not follow this link. - In everloop we have to set RGB values of each leds in everloop_image object before finally writing it to the bus. for example: for (matrix_hal::LedValue &led : everloop_image.leds) { led.red = 0; // Set green to 100 led.green = 100; led.blue = 0; led.white = 0; } // Updates the Everloop on the MATRIX device everloop.Write(&everloop_image); - You also need to make sure we have paho mqtt c installed because we will be subscribing to the topic "everloop"so that we can change the colors of everloop according to the situation. - The functions that are used for mqtt communication are written in mqtthelper.cpp file and this should be added while compiling the code. #include <iostream> #include <string.h> #include "MQTTClient.h" #include "mqtthelper.h" volatile double msg = 0; MQTTClient_deliveryToken deliveredtoken; MQTTClient client; MQTTClient_message pubmsg = MQTTClient_message_initializer; MQTTClient_deliveryToken token; int msgarrvd(void *context, char *topicName, int topicLen, MQTTClient_message *message) { msg = std::stod((char*)message->payload); MQTTClient_freeMessage(&message); MQTTClient_free(topicName); return 1; } void delivered(void *context, MQTTClient_deliveryToken dt) { deliveredtoken = dt; } void connlost(void *context, char *cause) { printf("\n[Everloop]: Connection lost\n"); printf(" cause: %s\n", cause); } void initMqtt(char *ADDRESS,char *CLIENTID,char *TOPIC,int QOS){ MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer; int rc; int ch; MQTTClient_create(&client, ADDRESS, CLIENTID,MQTTCLIENT_PERSISTENCE_NONE, NULL); conn_opts.keepAliveInterval = 20; conn_opts.cleansession = 1; MQTTClient_setCallbacks(client, NULL, connlost, msgarrvd, delivered); if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS) { printf("[Everloop]: Failed to connect, return code %d\n", rc); exit(EXIT_FAILURE); } std::cout<<"[Everloop]: Connected"<<std::endl; MQTTClient_subscribe(client, TOPIC, QOS); } void publishStatus(char *topic,char *payload){ int rc; pubmsg.payload = payload; pubmsg.payloadlen = 1; pubmsg.qos = 0; pubmsg.retained = 0; deliveredtoken = 0; MQTTClient_publishMessage(client,topic, &pubmsg, &token); printf("Waiting for publication of %s\n" "on topic %s \n", payload, topic); while(deliveredtoken != token); printf("[paymentny]Message with delivery\n"); } - Compile file using the command g++ -o everloop everloop.cpp mqtthelper.cpp -std=c++11 -lmatrix_creator_hal -lpaho-mqtt3c - Run your everloop program and check its working by publishing to topic everloopby running this python script.= "raspberrypi.local" port = 1883 user = "" client = mqttClient.Client("Python") #create new instance client.on_connect= on_connect #attach function to callback client.connect(broker_address, port=port) #connect to broker client.loop_start() #start the loop while Connected != True: #Wait for connection time.sleep(0.1) try: while True: value = raw_input() client.publish("everloop",value) except KeyboardInterrupt: client.disconnect() client.loop_stop() - First, make sure you have PyQt installed if not run this command sudo apt-get install python-qt4 - In front GUI we display the Menu and the current order, the script gets information from the topic guiFront/textthat is published by "main script". - The GUI for the kitchen contains orders that are yet to be served. Once the order is ready, and the "completed" button beneath the order is pressed it publishes orderid to the topic guiBack/completedOrderwhich is subscribed by "main script" that then calls the user and deliver the order after recognizing them through facial recognition. - The ideal delivery mechanism will ensure that every order reaches its correct customer. The main problem arises when people don't reach the counter in the same order as they were called, catering to this kind of problem is out of the scope of this project. - Currently, motor.cpp will run two motors whenever the user called to collect order is recognized by the camera. One motor delivers bottles from the vending machine and the other motor opens the window to collect the order from the counter.
https://www.hackster.io/rishabh-verma/voice-activated-vending-machine-b8098b
CC-MAIN-2021-17
en
refinedweb
Getting a result from a parallel task in Java using CompletableFuture December 4, 2016 Leave a comment In this post we saw how to start several processes on different threads using the CompletableFuture class. The example concentrated on methods with no return value. We let CompletableFuture finish the tasks in parallel before continuing with another process. In this post we’ll see a usage of CompletableFuture for functions with a return value. We’ll reuse several elements we saw in the post that concentrated on the Future class. The demo objects We’ll simulate a number of long running calculation processes that all return an integer but are independent of each other.. In the referenced post above we used the runAsync method of CompletableFuture to start the processes. The runAsync method was specifically designed for void processes where we were not expecting any return value. The function to use for processes that do have a return value is called supplyAsync. SupplyAsync accepts an object that implements the Supplier of T interface. A Supplier is similar to a Callable which is used in conjunction of the Future interface. The type parameter will be of the return type, integer in our case. A Supplier implements a method called get where the actual code is implemented to return the end result. Here’s our supplier to return the calculation result: import java.util.function.Supplier; public class CalculationServiceSupplier implements Supplier<Integer> { private final CalculationService calculationService; private final int firstOperand; private final int secondOperand; public CalculationServiceSupplier(CalculationService calculationService, int firstOperand, int secondOperand) { this.calculationService = calculationService; this.firstOperand = firstOperand; this.secondOperand = secondOperand; } @Override public Integer get() { return calculationService.calculate(firstOperand, secondOperand); } } The demo code In the below example we build a completable future for each of the calculation tasks, i.e. there will be 4 in total. We use the supplyAsync function of CompletableFuture as we hinted at above. Then for each process we’ll see the whenComplete function in action. whenComplete is attached to a completable future and is executed when the future has completed. It accepts a consumer with two parameters: the result of the process and a throwable object. The result will be the result returned by the parallel action and the throwable, i.e. the exception is populated in case there’s an exception in the parallel process. Here’s a possible implementation: private void tryCompletableFutureTasks() { ExecutorService cachedThreadPool = Executors.newCachedThreadPool(); CalculationService adder = new AdditionService(); CalculationService subtractor = new SubtractionService(); CalculationService multiplier = new MultiplicationService(); CalculationService divider = new DivisionService(); int firstOperand = 10; int secondOperand = 5; CompletableFuture<Integer> additionTask = CompletableFuture.supplyAsync(new CalculationServiceSupplier(adder, firstOperand, secondOperand), cachedThreadPool); CompletableFuture<Integer> subtractionTask = CompletableFuture.supplyAsync(new CalculationServiceSupplier(subtractor, firstOperand, secondOperand), cachedThreadPool); CompletableFuture<Integer> multiplicationTask = CompletableFuture.supplyAsync(new CalculationServiceSupplier(multiplier, firstOperand, secondOperand), cachedThreadPool); CompletableFuture<Integer> divisionTask = CompletableFuture.supplyAsync(new CalculationServiceSupplier(divider, firstOperand, secondOperand), cachedThreadPool); List<CompletableFuture<Integer>> allTasks = new ArrayList<>(); allTasks.add(additionTask); allTasks.add(subtractionTask); allTasks.add(multiplicationTask); allTasks.add(divisionTask); for (CompletableFuture<Integer> task : allTasks) { task.whenComplete((result, exception) -> { if (exception == null) { System.out.println(result); } else { task.completeExceptionally(exception); System.out.println(exception.getMessage()); } }); } } Run the function and the calculation results will appear in the debug window one by one as they are returned by their respective parallel functions. An alternative solution is to wait for all tasks to complete using the CompletableFuture.allOf method we saw in the post referenced in the first sentence of this post. Then we ask each completable future to return their results using the get function: CompletableFuture.allOf(additionTask, subtractionTask, multiplicationTask, divisionTask); allTasks.stream().forEach((task) -> { try { int result = task.get(); System.out.println(result); } catch (InterruptedException | ExecutionException ex) { System.err.println(ex.getMessage()); } }); View all posts related to Java here.
https://dotnetcodr.com/2016/12/04/getting-a-result-from-a-parallel-task-in-java-using-completablefuture/
CC-MAIN-2021-17
en
refinedweb
go / / fd679df69f9cbbb8e8be61c4700dfbe8d0aed06d / . / content / protobuf-apiv2.article blob: 952d73cdcd62111d13a87e7e5287b9649f86ef75 [ file ] [ log ] [ blame ] # A new Go API for Protocol Buffers 2 Mar 2020 Tags: protobuf, technical Summary: Announcing a major revision of the Go API for protocol buffers. OldURL: /a-new-go-api-for-protocol-buffers Joe Tsai Damien Neil Herbie Ong ## Introduction We are pleased to announce the release of a major revision of the Go API for [protocol buffers](), Google's language-neutral data interchange format. ## Motivations for a new API The first protocol buffer bindings for Go were [announced by Rob Pike]() in March of 2010. Go 1 would not be released for another two years. In the decade since that first release, the package has grown and developed along with Go. Its users' requirements have grown too. Many people want to write programs that use reflection to examine protocol buffer messages. The [`reflect`]() package provides a view of Go types and values, but omits information from the protocol buffer type system. For example, we might want to write a function that traverses a log entry and clears any field annotated as containing sensitive data. The annotations are not part of the Go type system. Another common desire is to use data structures other than the ones generated by the protocol buffer compiler, such as a dynamic message type capable of representing messages whose type is not known at compile time. We also observed that a frequent source of problems was that the [`proto.Message`]() interface, which identifies values of generated message types, does very little to describe the behavior of those types. When users create types that implement that interface (often inadvertently by embedding a message in another struct) and pass values of those types to functions expecting a generated message value, programs crash or behave unpredictably. All three of these problems have a common cause, and a common solution: The `Message` interface should fully specify the behavior of a message, and functions operating on `Message` values should freely accept any type that correctly implements the interface. Since it is not possible to change the existing definition of the `Message` type while keeping the package API compatible, we decided that it was time to begin work on a new, incompatible major version of the protobuf module. Today, we're pleased to release that new module. We hope you like it. ## Reflection Reflection is the flagship feature of the new implementation. Similar to how the `reflect` package provides a view of Go types and values, the [`google.golang.org/protobuf/reflect/protoreflect`]() package provides a view of values according to the protocol buffer type system. A complete description of the `protoreflect` package would run too long for this post, but let's look at how we might write the log-scrubbing function we mentioned previously. First, we'll write a `.proto` file defining an extension of the [`google.protobuf.FieldOptions`]() type so we can annotate fields as containing sensitive information or not. syntax = "proto3"; import "google/protobuf/descriptor.proto"; package golang.example.policy; extend google.protobuf.FieldOptions { bool non_sensitive = 50000; } We can use this option to mark certain fields as non-sensitive. message MyMessage { string public_name = 1 [(golang.example.policy.non_sensitive) = true]; } Next, we will write a Go function which accepts an arbitrary message value and removes all the sensitive fields. // Redact clears every sensitive field in pb. func Redact(pb proto.Message) { // ... } This function accepts a [`proto.Message`](), an interface type implemented by all generated message types. This type is an alias for one defined in the `protoreflect` package: type ProtoMessage interface{ ProtoReflect() Message } To avoid filling up the namespace of generated messages, the interface contains only a single method returning a [`protoreflect.Message`](), which provides access to the message contents. (Why an alias? Because `protoreflect.Message` has a corresponding method returning the original `proto.Message`, and we need to avoid an import cycle between the two packages.) The [`protoreflect.Message.Range`]() method calls a function for every populated field in a message. m := pb.ProtoReflect() m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { // ... return true }) The range function is called with a [`protoreflect.FieldDescriptor`]() describing the protocol buffer type of the field, and a [`protoreflect.Value`]() containing the field value. The [`protoreflect.FieldDescriptor.Options`]() method returns the field options as a `google.protobuf.FieldOptions` opts := fd.Options().(*descriptorpb.FieldOptions) (Why the type assertion? Since the generated `descriptorpb` package depends on `protoreflect`, the `protoreflect` package can't return the concrete options type without causing an import cycle.) We can then check the options to see the value of our extension boolean: if proto.GetExtension(opts, policypb.E_NonSensitive).(bool) { return true // don't redact non-sensitive fields } Note that we are looking at the field _descriptor_ here, not the field _value_. The information we're interested in lies in the protocol buffer type system, not the Go one. This is also an example of an area where we have simplified the `proto` package API. The original [`proto.GetExtension`]() returned both a value and an error. The new [`proto.GetExtension`]() returns just a value, returning the default value for the field if it is not present. Extension decoding errors are reported at `Unmarshal` time. Once we have identified a field that needs redaction, clearing it is simple: m.Clear(fd) Putting all the above together, our complete redaction function is: // Redact clears every sensitive field in pb. func Redact(pb proto.Message) { m := pb.ProtoReflect() m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { opts := fd.Options().(*descriptorpb.FieldOptions) if proto.GetExtension(opts, policypb.E_NonSensitive).(bool) { return true } m.Clear(fd) return true }) } A more complete implementation might recursively descend into message-valued fields. We hope that this simple example gives a taste of protocol buffer reflection and its uses. ## Versions We call the original version of Go protocol buffers APIv1, and the new one APIv2. Because APIv2 is not backwards compatible with APIv1, we need to use different module paths for each. (These API versions are not the same as the versions of the protocol buffer language: `proto1`, `proto2`, and `proto3`. APIv1 and APIv2 are concrete implementations in Go that both support the `proto2` and `proto3` language versions.) The [`github.com/golang/protobuf`]() module is APIv1. The [`google.golang.org/protobuf`]() module is APIv2. We have taken advantage of the need to change the import path to switch to one that is not tied to a specific hosting provider. (We considered `google.golang.org/protobuf/v2`, to make it clear that this is the second major version of the API, but settled on the shorter path as being the better choice in the long term.) We know that not all users will move to a new major version of a package at the same rate. Some will switch quickly; others may remain on the old version indefinitely. Even within a single program, some parts may use one API while others use another. It is essential, therefore, that we continue to support programs that use APIv1. - `github.com/golang/[email protected]` is the most recent pre-APIv2 version of APIv1. - `github.com/golang/[email protected]` is a version of APIv1 implemented in terms of APIv2. The API is the same, but the underlying implementation is backed by the new one. This version contains functions to convert between the APIv1 and APIv2 `proto.Message` interfaces to ease the transition between the two. - `google.golang.org/[email protected]` is APIv2. This module depends upon `github.com/golang/[email protected]`, so any program which uses APIv2 will automatically pick a version of APIv1 which integrates with it. (Why start at version `v1.20.0`? To provide clarity. We do not anticipate APIv1 to ever reach `v1.20.0`, so the version number alone should be enough to unambiguously differentiate between APIv1 and APIv2.) We intend to maintain support for APIv1 indefinitely. This organization ensures that any given program will use only a single protocol buffer implementation, regardless of which API version it uses. It permits programs to adopt the new API gradually, or not at all, while still gaining the advantages of the new implementation. The principle of minimum version selection means that programs may remain on the old implementation until the maintainers choose to update to the new one (either directly, or by updating a dependency). ## Additional features of note The [`google.golang.org/protobuf/encoding/protojson`]() package converts protocol buffer messages to and from JSON using the [canonical JSON mapping](), and fixes a number of issues with the old `jsonpb` package that were difficult to change without causing problems for existing users. The [`google.golang.org/protobuf/types/dynamicpb`]() package provides an implementation of `proto.Message` for messages whose protocol buffer type is derived at runtime. The [`google.golang.org/protobuf/testing/protocmp`]() package provides functions to compare protocol buffer messages with the [`github.com/google/cmp`]() package. The [`google.golang.org/protobuf/compiler/protogen`]() package provides support for writing protocol compiler plugins. ## Conclusion The `google.golang.org/protobuf` module is a major overhaul of Go's support for protocol buffers, providing first-class support for reflection, custom message implementations, and a cleaned up API surface. We intend to maintain the previous API indefinitely as a wrapper of the new one, allowing users to adopt the new API incrementally at their own pace. Our goal in this update is to improve upon the benefits of the old API while addressing its shortcomings. As we completed each component of the new implementation, we put it into use within Google's codebase. This incremental rollout has given us confidence in both the usability of the new API and the performance and correctness of the new implementation. We believe it is production ready. We are excited about this release and hope that it will serve the Go ecosystem for the next ten years and beyond!
https://go.googlesource.com/blog/+/fd679df69f9cbbb8e8be61c4700dfbe8d0aed06d/content/protobuf-apiv2.article
CC-MAIN-2021-17
en
refinedweb
Public API Interface for Configuration. More... #include <launchdarkly/boolean.h> #include <launchdarkly/json.h> #include <launchdarkly/export.h> Go to the source code of this file. Public API Interface for Configuration. Add another mobile key to the list of secondary environments. Both name, and key must be unique. You may not add the existing primary environment (the one you used to initialize LDConfig). The name of the key can later be used in conjunction with LDClientGetForMobileKey. This function returns false on failure. Free an existing LDConfig instance. You will likely never use this routine as ownership is transferred to LDClient on initialization. Creates a new default configuration. mobileKey is required. The configuration object is intended to be modified until it is passed to LDClientInit, at which point it should no longer be modified. Set the path to the SSL certificate bundle used for peer authentication. This API is ineffective if LDConfigSetVerifyPeer is set to false. See CURLOPT_CAINFO for more information. Enables or disables real-time streaming flag updates. Default: true. When set to false, an efficient caching polling mechanism is used. We do not recommend disabling streaming unless you have been instructed to do so by LaunchDarkly support.
https://launchdarkly.github.io/c-client-sdk/config_8h.html
CC-MAIN-2021-17
en
refinedweb
In the words of the immortal Ken Wheeler: GraphQL is kind of like the s***. Actually, it's absolutely the s***. I tend to agree with this sentiment but that doesn't mean I think GraphQL is perfect. One of the most persistent challenges that has faced GraphQL since its introduction 5 years ago is client side caching. Does the GraphQL Specification Address Caching? The GraphQL specification aims to support a broad range of use cases. Caching has been considered out-of-scope for the spec itself since it wants to be as general as possible. Out of the roughly 30,000 words contained in the current working draft the word cache appears exactly once in section 3.5.5 on ID's: The ID scalar type represents a unique identifier, often used to refetch an object or as the key for a cache. In this article I'll try to answer a few high level questions around GraphQL caching including: - Why does GraphQL struggle with client side caching? - Why does this matter in GraphQL more so than REST? - What solutions do we currently have for this problem and what potential solutions are people working on? While the spec leaves caching to the imagination there is the next best thing to the spec, GraphQL.org. They have a page dedicated to explaining caching with GraphQL that I'll summarize after a quick primer on HTTP caching. HTTP Caching Before talking about strategies for GraphQL caching, it's useful to understand HTTP caching. Freshness and validation are different ways of thinking about how to control client and gateway caches. Client side and Gateway caches - Client side caches (browser caches) use HTTP caching to avoid refetching data that is still fresh - Gateway caches are deployed along with a server to check if the information is still up to date in the cache to avoid extra requests Freshness and Validation - Freshness lets the server transmit the time a resource should be considered fresh (through Cache-Controland Expiresheaders) and works well for data that doesn’t change often - Validation is a way for clients to avoid refetching data when they’re not sure if the data is still fresh or not (through Last-Modifiedand Etags) GraphQL Caching Clients can use HTTP caching to easily avoid refetching resources in an endpoint-based API. The URL is a globally unique identifier. It can be leveraged by the client to build a cache by identifying when two resources are the same. Only the combination of those two parameters will run a particular procedure on the server. Previous responses to GET requests can be cached and future requests can be routed through the cache. A historical response can be returned if possible. Globally Unique IDs Since GraphQL lacks a URL-like primitive the API usually exposes a globally unique identifier for clients to use. One possible pattern for this is reserving a field ( id). { starship(id:"3003") { id name } droid(id:"2001") { id name friends { id name } } } The id field provides a globally unique key. This is simple if the backend uses a UUID. But a globally unique identifier will need to be provided by the GraphQL layer if it is not provided by the backend. In simple cases this involves appending the name of the type to the ID and using that as the identifier. Compatibility with existing APIs How will a client using the GraphQL API work with existing APIs? It will be tricky if our existing API accepts a type-specific id while our GraphQL API uses globally unique identifiers. The GraphQL API can expose the previous API in a separate field and GraphQL clients can rely on a consistent mechanism for getting a globally unique identifier. Alternatives The client needs to derive a globally unique identifier for their caching. Having the server derive that id simplifies the client but the client can also derive the identifier. This can require combining the type of the object (queried with __typename) with some type-unique identifier. Dhaivat Pandya wrote and spoke extensively back in 2016 about how Apollo was tackling caching. We'll talk more about Apollo's cache later, but here is a high level summary of Dhaivat Pandya's thoughts. Query result trees represent a way to get trees out of your app data graph. Apollo Client applies two assumptions to cache query result trees. - Same path, same object — Same query path usually leads to the same piece of information - Object identifiers when the path isn't enough — Two results given for the same object identifier represent the same node/piece of information Apollo Client will update the query with a new result if any cache node involved in a query result tree is updated. Apollo Client Apollo Client stores the results of its GraphQL queries in a normalized, in-memory cache for responding sparingly to future queries for the same data. Normalization constructs a partial copy of your data graph on your client. The format is optimized for reading and updating the graph as your application changes state. You can configure the cache's behavior for other use cases: - Specify custom primary key fields - Customize the storage and retrieval of individual fields - Customize the interpretation of field arguments - Define supertype-subtype relationships for fragment matching - Define patterns for pagination - Manage client-side local state InMemoryCache import { InMemoryCache, ApolloClient } from '@apollo/client' const client = new ApolloClient({ cache: new InMemoryCache(options) }) Data normalization InMemoryCache has an internal data store for normalizing query response objects before the objects are saved: - Cache generates a unique ID for every identifiable object in the response - Cache stores objects by ID in a flat lookup table - Whenever an incoming object is stored with a duplicate ID the fields of those objects are merged - If incoming and existing object share fields, cached values for those fields are overwritten by incoming object - Fields in only existing or only incoming object are preserved InMemoryCache can exclude normalization for objects of a certain type for metrics and other transient data that's identified by a timestamp and never receives updates. Objects that are not normalized are embedded within their parent object in the cache. These objects can be accessed via their parent but not directly. readQuery readQuery enables you to run a GraphQL query directly on your cache. If the cache contains all necessary data it returns a data object in the shape of the query, otherwise it throws an error. It will never attempt to fetch data from a remote server. Pass readQuery a GraphQL query string const { todo } = client.readQuery({ query: gql` query ReadTodo { todo(id: 5) { id text completed } } `, }) Provide GraphQL variables to readQuery const { todo } = client.readQuery({ query: gql` query ReadTodo($id: Int!) { todo(id: $id) { id text completed } } `, variables: { id: 5, }, }) readFragment readFragment enables you to read data from any normalized cache object that was stored as part of any query result. Calls do not need to conform to the structure of one of your data graph's supported queries like with readQuery. Fetch a particular item from a to-do list const todo = client.readFragment({ id: 'Todo:5', fragment: gql` fragment MyTodo on Todo { id text completed } `, }) writeQuery, writeFragment You can also write arbitrary data to the cache with writeQuery and writeFragment. All subscribers to the cache (including all active queries) see this change and update the UI accordingly. Same signature as read counterparts except with additional data variable client.writeFragment({ id: '5', fragment: gql` fragment MyTodo on Todo { completed } `, data: { completed: true, }, }) Combining reads and writes readQuery and writeQuery can be combined to fetch currently cached data and make selective modifications. Create a new Todo item that is cached without sending it to the remote server.], }, }) cache.modify cache.modify of InMemoryCache enables you to directly modify the values of individual cached fields, or even delete fields entirely. This is an escape hatch you want to avoid. Although, as we'll see at the end of the article, some people think we should only have an escape hatch. urql Urql also modifies __typename like Apollo but it caches at the query level. It keeps track of the types returned for each query. If data modifications are performed on a type, the cache is cleared for all queries that hold that type. mutation { updateTask(id: 2, assignedTo: "Bob") { Task { id assignedTo } } } The metadata returned will show that a task was modified, and so all queries holding task results will be invalidated, and run against the network the next time they’re needed. But urql has no way of knowing what the query holds. This means that if you run a mutation creating a task that’s assigned to Fred instead of Bob, the mutation result will not be able to indicate that this particular query needs to be cleared. micro-graphql-react According to Adam Rackis, Urql's problem can actually be solved with a build step that manually introspects the entire GraphQL endpoint. Adam couldn't get other GraphQL client cache's to behave the way he wanted. He decided to build a GraphQL client with low-level control called micro-graphql-react. It provides the developer with building blocks for managing cache instead of adding metadata to queries to form a normalized, automatically-managed cache. Import client for global subscriptions to keep cache correct graphqlClient.subscribeMutation([ { when: /updateX/, run: (op, res) => syncUpdates(Y, res.update, "allX", "X") }, { when: /deleteX/, run: (op, r) => syncDeletes(Y, r.delete, "allX", "X") } ]) let { loading, loaded, data } = useQuery( buildQuery( Y, { publicUserId, userId }, { onMutation: { when: /(update|delete)X/, run: ({ refresh }) => refresh() } } ) ) Sync changes when relevant mutations happen let { loading, loaded, data } = useQuery( buildQuery( AllSubjectsQuery, { publicUserId, userId }, { onMutation: { when: /(update|delete)Subject/, run: ({ refresh }) => refresh() } } ) ) Cache Resetting micro-graphql-react was written with the assumption that managing cache invalidation should not be a framework concern. It should be easy to manage yourself with a set of primitives for different types of cache resetting. - Hard reset to clear cache and reload the query - Soft reset to clear cache, but update, and leave current results on screen - Can also update the raw cache It does not parse your queries or mutations on the client-side like Apollo and Urql. This keeps the library small and omits the GraphQL queries from your bundle. Section and Distributed GraphQL I know nothing about this and this article's length is already out of control but I found one nascent approach that seems worth mentioning. A company called Section is trying to build a distributed GraphQL solution. It is fully configurable to address caching challenges without having to maintain a distributed system as the distributed system would be managed by them. They say that it's simultaneously similar to Apollo Federation but also solving a problem Apollo Federation doesn't solve, so I'm curious how exactly that works. On first look it seems like they are taking the approach of micro-graphql-react and giving more cache control back to the developers. Persistent Queries One more thing getting thrown around in this conversation that I'll need an addition article to cover is persistent queries. The idea is to send a query id or hash instead of an entire GraphQL query string. This reduces bandwidth utilization and speeds up loading times for end-users. Resources Caching GraphQL - Mark Nottingham - Caching Tutorial for Web Authors and Webmasters - GraphQL.org - Caching - Sam Silver - GraphQL Client-Side Caching - Scott Walkinshaw - Caching GraphQL APIs - Tanmai Gopal - An approach to automated caching for public & private GraphQL APIs Apollo - Dhaivat Pandya - GraphQL Concepts Visualized - Marc-André Giroux - GraphQL & Caching: The Elephant in the Room - Blessing Krofegha - Understanding Client-Side GraphQl With Apollo-Client In React Apps - John Haykto - GraphQL Client-Side Caching with Apollo Links - Marc-André Giroux - Caching & GraphQL: Setting the Story Straight - Ben Newman - Fine Tuning Apollo Client Caching for Your Data Graph - Khalil Stemmler - Using Apollo Client 3 as a State Management Solution urql - Kurt Kemple - Intro to Urql - Ben Awad - Urql - a new GraphQL Client - Ken Wheeler - Introduction to urql - A new GraphQL Client for React - Gerard Sans - Comparing Apollo vs Urql - Phil Pluckthun, Jovi De Croock - Client-Side GraphQL Using URQL - Ryan Gilbert - Taking Flight with URQL micro-graphql-react - Adam Rackis - A Different Approach to GraphQL Caching - Adam Rackis - An Alternate Approach to GraphQL Caching Discussion (2) In case you don't just want to cache in the client, but also in a CDN, you may wanna check out graphcdn.io Ooooo, I'm all about that CDN life, so if someone stuck GraphQL on a CDN I'm definitely game for that. Added myself to the waiting list.
https://practicaldev-herokuapp-com.global.ssl.fastly.net/ajcwebdev/graphql-caching-42ep
CC-MAIN-2021-17
en
refinedweb
Important You need to become part of the openEO Platform "early adopter" program (opens new window) to access the processing infrastructure. This Getting Started guide will simply give you an overview of the capabilities of the openEO Python client library. More in-depth information and documentation can be found on the official documentation (opens new window) website. The High level Interface (opens new window) of the Python client is designed to provide an opinionated, Pythonic, API to interact with openEO Platform. # Installation The openEO Python client library can easily be installed with a tool like pip, for example: pip install openeo It's recommended to work in a virtual environment of some kind ( venv, conda, ...), containing Python 3.6 or higher. TIP For more details, alternative installation procedures or troubleshooting tips: see the official openeo package installation documentation (opens new window). # Connect to openEO Platform and explore First, establish a connection to the openEO Platform back-end: import openeo connection = openeo.connect("openeo.cloud") The Connection object (opens new window) is your central gateway to - list data collections, available processes, file formats and other capabilities of the back-end - start building your openEO algorithm from the desired data on the back-end - execute and monitor (batch) jobs on the back-end - etc. # Collections The Earth observation data (the input of your openEO jobs) is organised in so-called collections, e.g. fundamental satellite collections like "Sentinel 1" or "Sentinel 2", or preprocessed collections like "NDVI". Note More information on how openEO "collections" relate to terminology used in other systems can be found in the openEO glossary (opens new window). While it's recommended to browse the available EO collections on the openEO Platform collections overview webpage, it's possible to list and inspect them programmatically. As a very simple usage example of openEO Python client, let's use the list_collection_ids (opens new window) and describe_collection (opens new window) methods on the connection object we just created: >>> # Get all collection ids >>> print(connection.list_collection_ids()) ['AGERA5', 'SENTINEL1_GRD', 'SENTINEL2_L2A', ... >>> # Get metadata of a single collection >>> print(connection.describe_collection("SENTINEL2_L2A")) {'id': 'SENTINEL2_L2A', 'title': 'Sentinel-2 top of canopy ...', 'stac_version': '0.9.0', ... TIP The openEO Python client library comes with Jupyter (notebook) integration in a couple of places. For example, put connection.describe_collection("SENTINEL2_L2A") (without print()) as last statement in a notebook cell and you'll get a nice graphical rendering of the collection metadata. TIP Find out more about data discovery, loading and filtering in the official openEO Python client documentation (opens new window) #" that implements a certain algorithm. Note Check the openEO glossary (opens new window) for more details on pre-defined, user-defined processes and process graphs. Let's list the available pre-defined processes with list_processes (opens new window): >>> print(connection.list_processes()) [{'id': 'absolute', 'summary': 'Absolute value', 'description': 'Computes the absolute value of ... {'id': 'mean', 'summary': 'Arithmetic mean(average)', ... ... Like with collections, instead of programmatic exploration you'll probably prefer a more graphical, interactive interface. Use the Jupyter notebook integration (put connection.list_processes() without print() as last statement in a notebook cell) or visit a web-based overview of the available processes on openEO Platform. TIP Find out more about process discovery and usage official openEO Python client documentation (opens new window) # Authentication In the code snippets above we did not need to log in as a user openEO Python client library tries to make authentication as streamlined as possible. In most cases for example, the following snippet is enough to obtain an authenticated connection: import openeo connection = openeo.connect("openeo.cloud").authenticate_oidc() This statement will automatically reuse a previously authenticated session, when available. Otherwise, e.g. the first time you do this, some user interaction is required and it will print a web link and a short user code. Visit this web page in a browser, log in there with an existing account and enter the user code. If everything goes well, the connection object in the script will be authenticated and the back-end will be able to identify you in subsequent requests. More detailed information on authentication can be found in the openEO Python client documentation (opens new window). # Working with Datacubes Now that we know how to discover the capabilities of the back-end and how to authenticate, let's do some real work and process some EO data in a batch job. We'll first build the desired algorithm by working on so-called "Datacubes", which is the central concept in openEO to represent EO data, as discussed in great detail here (opens new window). # Creating a Datacube The first step is loading the desired slice of a data collection with Connection.load_collection (opens new window): datacube = connection.load_collection( "SENTINEL1_GRD", spatial_extent={"west": 16.06, "south": 48.06, "east": 16.65, "north": 48.35}, temporal_extent=["2017-03-01", "2017-04-01"], bands=["VV", "VH"] ) This results in a DataCube object (opens new window) containing the "SENTINEL1_GRD" data restricted to the given spatial extent, temporal extent and bands. # Applying processes By applying an openEO process on a datacube, we create a new datacube object that represents the manipulated data. The openEO Python client allows to do this by calling DataCube object methods (opens new window). The most common or popular openEO processes have a dedicated DataCube method (e.g. mask, aggregate_spatial, filter_bbox, ...). There are also some convenience methods that implement more complex openEO processes constructs is a compact, Pythonic interface. For example, the DataCube.min_time (opens new window) method implements a reduce_dimension process along the temporal dimension, using the min process as reducer function: datacube = datacube.min_time() This creates a new datacube (we overwrite the existing variable), where the time dimension is eliminated and for each pixel we just have the minimum value of the corresponding timeseries in the original datacube. See the Python client's DataCube API documentation (opens new window) for a more complete listing of methods that implement openEO processes. openEO processes that are not supported by a dedicated DataCube method can be applied in a generic way with the process method (opens new window), e.g.: datacube = datacube.process( process_id="ndvi", arguments={ "data": datacube, "nir": "B8", "red": "B4"} ) This applies the ndvi process to the datacube with the arguments of "data", "nir" and "red" (This example assumes a datacube with bands B8 and B4). Note Still unsure on how to make use of processes with the Python client? Visit the official documentation on working with processes (opens new window). # Defining output format After applying all processes you want to execute, we need to tell the back-end to export the datacube, for example as GeoTiff: result = datacube.save_result("GTiff") # Execution It's important to note that all the datacube processes we applied up to this point are not actually executed yet, neither locally nor remotely on the back-end. We just built an abstract representation of the algorithm (input data and processing chain), encapsulated in a local DataCube object (e.g. the result variable above). To trigger actual execution on the back-end we have to explicitly send this representation to the back-end. openEO defines several processing modes (opens new window), but for this introduction we'll focus on batch jobs, which is a good default choice. # Batch job execution The result datacube object we built above describes the desired input collections, processing steps and output format. We can now just send this description to the back-end to create a batch job with the create_job method (opens new window) like this: # Creating a new job at the back-end by sending the datacube information. job = result.create batch job, which is referenced by the returned job object, is only created at the back-end, it is not started yet. To start the job and let your Python script wait until the job has finished then download it automatically, you can use the start_and_wait method. # Starts the job and waits until it finished to download the result. job.start_and_wait() job.get_results().download_files("output") When everything completes successfully, the processing result will be downloaded as a GeoTIFF file in a folder "output". TIP The official openEO Python Client documentation has more information on batch job management and downloading results (opens new window) # Additional Information Additional information and resources about the openEO Python Client Library:
https://docs.openeo.cloud/getting-started/python/
CC-MAIN-2022-40
en
refinedweb
fork1(2) [opensolaris man page] fork(2) System Calls fork(2) NAME fork, fork1, forkall, forkx, forkallx - create a new process SYNOPSIS #include <sys/types.h> #include <unistd.h> pid_t fork(void); pid_t fork1(void); pid_t forkall(void); #include <sys/fork.h> pid_t forkx(int flags); pid_t forkallx(int flags); DESCRIPTION: descriptors and directory streams. Each of the child's file descriptors shares a common file pointer with the corresponding file descriptorlimit(2). The it_value and it_interval values for the ITIMER_REAL timer are reset to 0; see getitimer(2). o The set of signals pending for the child process is initialized to the empty set. o Timers created by timer_create(3C) are not inherited by the child process. o No asynchronous input or asynchronous output operations are inherited by the child. o Any preferred hardware address tranlsation sizes (see memcntl(2)) are inherited by the child. o. Threads(). Fork Extensions The forkx() and forkallx() functions accept a flags argument consisting of a bitwise inclusive-OR of zero or more of the following flags, which are defined in the header <sys/fork.h>: FORK_NOSIGCHLD Do not post a SIGCHLD signal to the parent process when the child process terminates, regardless of the disposition of the SIGCHLD sig- nal in the parent. SIGCHLD signals are still possible for job control stop and continue actions if the parent has requested them. FORK_WAITPID(). fork() Safety inter- faces." RETURN VALUES. ERRORS The fork(), fork1(), forkall(), forkx(), and forkallx() functions. The forkx() and forkallx() functions will fail if: EINVAL The flags argument is invalid. ATTRIBUTES See attributes(5) for descriptions of the following attributes: +-----------------------------+-----------------------------+ | ATTRIBUTE TYPE | ATTRIBUTE VALUE | +-----------------------------+-----------------------------+ |Interface Stability |Committed | +-----------------------------+-----------------------------+ |MT-Level |Async-Signal-Safe. | +-----------------------------+-----------------------------+ |Standard |See below. | +-----------------------------+-----------------------------+ For fork(), see standards(5). SEE) NOTES. SunOS 5.11 28 Oct 2008 fork(2)
https://www.unix.com/man-page/opensolaris/2/fork1/
CC-MAIN-2022-40
en
refinedweb
table of contents - bullseye 7.74.0-1.3+deb11u3 - bullseye-backports 7.85.0-1~bpo11+1 - testing 7.85.0-1 - unstable 7.85.0-1 NAME¶ CURLINFO_PRETRANSFER_TIME_T - get the time until the file transfer start SYNOPSIS¶ #include <curl/curl.h> CURLcode curl_easy_getinfo(CURL *handle, CURLINFO_PRETRANSFER_TIME_T, curl_off_t *timep); DESCRIPTION¶ Pass a pointer to a curl_off_t to receive the time, in microseconds, it took from the start until the file transfer is just about to begin. This includes all pre-transfer commands and negotiations that are specific to the particular protocol(s) involved. It does not involve the sending of the protocol- specific request that triggers a transfer. When a redirect is followed, the time from each request is added together. See also the TIMES overview in the curl_easy_getinfo(3) man page. PROTOCOLS¶ All EXAMPLE¶ curl = curl_easy_init(); if(curl) { curl_off_t pretransfer; curl_easy_setopt(curl, CURLOPT_URL, url); res = curl_easy_perform(curl); if(CURLE_OK == res) { res = curl_easy_getinfo(curl, CURLINFO_PRETRANSFER_TIME_T, &pretransfer); if(CURLE_OK == res) { printf("Time: %" CURL_FORMAT_CURL_OFF_T ".%06ld", pretransfer / 1000000, (long)(pretransfer % 1000000)); } } /* always cleanup */ curl_easy_cleanup(curl); } AVAILABILITY¶ Added in 7.61.0 RETURN VALUE¶ Returns CURLE_OK if the option is supported, and CURLE_UNKNOWN_OPTION if not. SEE ALSO¶ curl_easy_getinfo(3), curl_easy_setopt(3), CURLINFO_PRETRANSFER_TIME(3)
https://manpages.debian.org/testing/libcurl4-doc/CURLINFO_PRETRANSFER_TIME_T.3.en.html
CC-MAIN-2022-40
en
refinedweb
DotVVM and ASP.NET Core: Implementing CRUD operations DotVVM is a ASP.NET framework that allows us to create web applications using the MVVM (Model-View-Viewmodel) design pattern with C# and HTML. In this tutorial we will learn how to create CRUD operations (Create, Read, Update, and Delete) using the DotVVM and .NET Core framework. Want to know what are the steps to create a DotVVM app? To learn about this you can review this article: Steps to create an MVVM application (Model-View-Viewmodel) with DotVVM and ASP.NET Core For our case study, we will take as an example entity the information of a student for the realization of CRUD operations and divide the application into three parts: - Data Access Layer Implementation: to manage connection and database access. - Implementation of the BL (Business Layer): for the management of services and logic of the application domain. - Implementation of the application presentation layer. This section is where DotVVM comes into action. Part 1: Data Access Layer — DAL As the first point to create our application, in the Data Access Layer we must define the entities that the application domain will have and the DBContext at which the reference for the connection to our database will be defined. In the application we will use the entity: Student. If we had other entities, for example: Professor, Subject, etc; these will be located in the Entities folder. For our case study, the Student entity will be defined as follows: public class Student { public int Id { get; set; } public string FirstName { get; set; } public string LastName { get; set; } public string About { get; set; } public DateTime EnrollmentDate { get; set; } } On the other hand, we have the DBContext, the primary class with which our application will interact (Operations in business logic) and that will allow communication with the database: public class StudentDbContext : DbContext { public StudentDbContext(DbContextOptions options) : base(options) { Database.EnsureCreated(); } public DbSet<Student> Students { get; set; } } In the StudentDbContext there are two important things to say: context.Database.EnsureCreated()is a primary Entity Framework - EF method that ensures that the database exists for the context. If it exists, no action is taken. If it does not exist, the database and all its schemas are created and are also guaranteed to support the model for this context. DbSet<Student> Students {get; set;}; represents a collection for the Studententity within the data model and is the gateway to database operations with this entity. Typically this section of the DAL (entities such as Student and associations in the DbContext) is automatically generated based on entities already defined in the database through Entity Framework. Part 2: Business Layer — BL Now we need to define the models and create the services to control the logic of our application. In this case, what we are looking for is to have a general list of students and the specific information of each of them. Models As a first point we will define our models: StudentListModel: we implement the services of our application. Services In this case we have the Student service that will allow us to carry out CRUD operations. public class StudentService { private readonly StudentDbContext studentDbContext; public StudentService(StudentDbContext studentDbContext) { this.studentDbContext = studentDbContext; } public async Task<List<StudentListModel>> GetAllStudentsAsync()... public async Task<StudentDetailModel> GetStudentByIdAsync(int studentId)... public async Task UpdateStudentAsync(StudentDetailModel student)... public async Task InsertStudentAsync(StudentDetailModel student)... public async Task DeleteStudentAsync(int studentId)... } For managing information stored in the database using LINQ — Language Integrated Query, a component of the Microsoft .NET platform that adds data query capabilities natively to the .NET languages. In other words, LINQ allows us to query collections of objects (the entities defined in the DAL) to handle information and perform operations on the database. To understand a little more about how LINQ works, in the following method we can see the procedure that allows us to obtain the information of a particular student according to their identification: public async Task<StudentDetailModel> GetStudentByIdAsync(int studentId) { return await studentDbContext.Students.Select( s => new StudentDetailModel { Id = s.Id, FirstName = s.FirstName, LastName = s.LastName, About = s.About, EnrollmentDate = s.EnrollmentDate }) .FirstOrDefaultAsync(s => s.Id == studentId); } For more detail, see how LINQ works, you can refer to the Microsoft documentation at: Part 3: Application Presentation Layer Now that we have defined the DAL and BL, we must carry out the design of the website so that the user can interact with it and in this case, perform CRUD operations for the management of Students. This is the part where DotVVM comes into action. Each page in DotVVM consists of two files: - A view, which is based on HTML syntax and describes how the page will look. - A view model, which is a class in C# that describes the state of the page (for example, values in the form fields) and controls user interactions (for example, button clicks). For us we will have four Views and four Models associated with these Views (Viewmodels): - will see something like this: Next, let’s analyze in more detail server to view will be fine in this case. Learn more about Binding Directions at: Finally in the Default Viewmodel>Students</h1> <dot:RouteLink </div> <dot:GridView <Columns> <dot:GridViewTextColumn <dot:GridViewTextColumn <dot:GridViewTemplateColumn> <dot:RouteLink </dot:GridViewTemplateColumn> <dot:GridViewTemplateColumn> <dot:RouteLink Text="{resource: Texts.Label View of Default, the layout of the page becomes inBox, ComboBox, file handling, among others, that allow us to maintain communication between the View and the sources of information defined in the Viewmodels. See more at: Continuing our analysis, in the GridView we have the columns, such as viewing detail or modifying the record of a student in particular based on their ID: <dot:RouteLink RouteName=”Edit” Param-Id=”{{value: Id}}” /> These paths and their corresponding parameters are to be defined in the file DotvvmStartup.cs in the ConfigureRoutes method as follows: config.RouteTable.Add("Edit", "edit/{Id}", "Views/Edit.dothtml"); To learn more about Routing on DotVVM you can go to: The Create, View Detail, and Modify pages follow the same logic for View and Viewmodel components. When you add some student records in the app and load the homepage with the list of them, we will have something like this: What’s next? With these steps we have analyzed the most important parts of an MVVM web application using DotVVM y.NET Core with the help of Visual Studio 2019 to implement a CRUD about students information. The code in this example can be found on the sample page that Visual Studio 2019 has for DotVVM when generating a new project of type: DotVVM with ASP.NET Core and selecting the option: Sample CRUD Page. You can also find the code in this repository on GitHub here. Thank you In the next articles we will continue to review the components that DotVVM has for the design of web pages and case studies that may be useful to us in the future. To be aware of these and other news, you can follow me on Twitter: twitter.com/esDanielGomez See you soon!
https://medium.com/dotvvm/dotvvm-and-asp-net-core-implementing-crud-operations-ecac3127ff6d?source=post_internal_links---------6----------------------------
CC-MAIN-2022-40
en
refinedweb
Each Answer to this Q is separated by one/two green lines. Is there a way to declare a variable as unused in PyCharm, or in Python in general, so you can explicitly tell the compiler not to give a warning about it? I am not talking about the convention of naming unused variables for the programmer (often named “_” or “__”), but an option to explicitly mark a variable as unused for the compiler, for example in a loop. I also don’t just want to disable inspections in general. I’ve heard that you can do this in PyDev by beginning the variable name with “unused”, and I thought this might exist in PyCharm as well, but couldn’t find it yet. You can disable this inspection either for a single statement like: # noinspection PyUnusedLocal unused_thing = something() or for a whole function (or class) by placing the comment above the function (or class): # noinspection PyUnusedLocal def foo(): unused_thing = something() For some reason this particular inspection cannot be switched off via the inspections context menu… maybe worth a pycharm ticket. You can easily and least intrusively ignore pycharm unused local warnings (only) for unused function parameters by prefixing them with underscores. E.g. In the following code, pycharm will not warn about the unused parameter _bar def foo(_bar): print("nothing here") I’ve noticed that using a single underscore for the throwaway variable name seems to bypass this check. I’m using PyCharm 2016.1.3. for _ in range(3): pass Another way, similar to UNUSED in C++ (here), which works if you want to hide the warning on a specific function parameter but keeps the warning enabled for the rest of the function: # noinspection PyUnusedLocal def UNUSED(*args, **kwargs): pass def my_function(alpha, beta, gamma): UNUSED(gamma) return alpha + beta Just to extend to sebastian’s answer, if you use a decorator with the function. You need to place the # noinspection PyUnusedLocal above the decorator if you place it between the decorator and the function name it will not work. # noinspection PyUnusedLocal @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ pass
https://techstalking.com/programming/python/explicitly-declaring-variable-as-unused-in-python-pycharm/
CC-MAIN-2022-40
en
refinedweb
honest the recent period has been hard for my creativity for various reasons. In spite of the ongoing pandemy, which is hard to go through having kids under the roof, I’ve started working on something that might be interesting for you. But it’s a story to be released in 2 weeks 😉 Creating patterns This example may seem strange at first, but the fact is that it’s been the actual production usage of scapy in my case. I needed to sniff network and discover situations where certain sequece of control bytes was sent, extract some information out of it and send it to external server. In this example I’ll create such a sequence, add some valid data, save it as pcap file and then extract it with scapy. Let’s begin with adding sequence of 0x01 0x02 0x03 0x04 0x05 in packets’ payload in front of a byte of actual data to send. import socket, sys HOST = 'localhost' PORT = 4321 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sender: sender.connect((HOST, PORT)) for char in sys.argv[1]: sender.sendall(b'\x01\x02\x03\x04\x05' + char.encode('utf-8')) Now we can start netcat server: $ nc -l 127.0.0.1 4321 And listen to the network with wireshark filtering connections in loopback (localhost) and on port 4321: What we also need is sending actual data. Let’s run the script prepared earlier: $ python send.py "I made it! 🤘" Scapy – pcap file analysis – code We need to modify the code a little bit. First of all we need to sniff offline data saved in pcap files instead of live network communication. In order to achieve it we need to pass offline argument with the name of the file we want to analyze. In order to extract desired packets, we need to pass proper function reference as lfilter argument and prn to process filtered packets. class Sniffer: def __init__(self): super().__init__() self._message = b'' # ... def run(self): sniff( offline='dump.pcapng', lfilter=self.my_filter, prn=self.display_filtered_packet ) Next, we need to filter the packets that contain the sequence defined earlier: def my_filter(self, packet: Packet) -> bool: return b'\x01\x02\x03\x04\x05' in raw(packet.payload) The last step is to clear data and display it on exit: class Sniffer: # ... def __exit__(self, exc_type, exc_val, exc_tb): print(self._message.decode('utf-8')) def display_filtered_packet(self, packet: Packet): raw_packet = raw(packet.payload) position = raw_packet.find(b'\x01\x02\x03\x04\x05') raw_packet = raw_packet[position:].replace(b'\x01\x02\x03\x04\x05', b'').replace(b'\r', b'') self._message += raw_packet The display_filtered_packet builds a message, but does not show it immediately since it would result in showing it byte by byte and this will be a problem with utf-8 characters that are saved in several bytes. Execute Having built our custom pcap file analyzer, let’s run it against the prepared file. $ python analyze.py I made it! 🤘 As you can see, I really made it, message was decoded correctly, including the utf-8 I entered. Learn more about scapy I strongly encourage you to visit scapy project page. You’ll learn more about making HTTP requests or fuzzing. If you have any interesting tool built with scapy, let me know in the comments so that we can share more knowledge. You’ll find full code example on my Gitlab. See also You can also subscribe to the newsletter, and go through previous parts of the HackPy series:
https://blacksheephacks.pl/hackpy-part-4-pcap-files-analysis-with-scapy/
CC-MAIN-2022-40
en
refinedweb
When building out applications, sending email is a frequently needed feature. In addition, it's likely that the data sent in the email needs to be stored in a database for record keeping, analytics, or additional processing. AWS provides a range of services that help make setting up an API, database, and email transport quick, and secure. Specifically, AWS Amplify provides a lot of the functionality we'll need out of the box, and Amazon SES will send emails on our behalf. Overview of Services AWS Amplify is a suite of services ranging from UI components and a use case focused CLI to a CI/CD console and backend-scaffolding GUI. On the other hand, Amazon SES provides a scalable solution for sending email and is used at companies like Reddit and Netflix. In this post, we'll be using the Amplify CLI along with its JavaScript libraries to create a backend for a contact form, and use Amazon SES to send that information to our email. Getting Started If you don't already have an AWS account or have the Amplify CLI installed, follow this guide. 🚨 This project makes use of lambda environment variables. The ability to do this via the CLI was introduced in version 5.1.0. You may need to run npm install -g @aws-amplify/clito ensure you're on the latest version. Once setup, clone this contact-form-starter branch from this github url. After cloning the project, install the dependencies and run the project. Below are some helpful commands: // visit: git clone [email protected]:mtliendo/amplify-email-recipes.git cd amplify-email-recipes git checkout contact-form-starter npm install npm run dev Once the project has started, visit localhost:3000 and you should be presented with the following screen: Understanding Our Backend Using the above image as a reference, the Amplify services we'll need for our backend are in the following order: AppSync: Fully managed GraphQL API DynamoDB: NoSQL database Lambda: FaaS/cloud function In short, when a user fills out their contact form, that information will be stored in our database via our API. When that item is successfully saved, it will automatically trigger a function to send an email. Sounds like a lot. Let's see what we have to do to get this working. Initializing Our Backend 🚀 We'll start creating our backend by opening up a terminal and making sure we're in the root directory of our project. From here, we'll initialize Amplify by running the following command: amplify init We'll give our project a name and when prompted, select n to deny the default configuration. This is because we will be deploying our application as a static site. In NextJS, the name of that build directory is called out. In the terminal, accept all of the prompts, except when it comes to the Distribution Directory Path enter out. The entire flow should look like the below screenshot: Lastly, after selecting AWS profile we'll choose the profile we'd like to use. The flow should look similar to the following screenshot: Adding An API With our application ready to use the Amplify CLI, we'll create our backend. As mentioned earlier, we are using AWS AppSync, which is a managed GraphQL API. Traditionally, when sending email, a REST API is used. However, I've found that as needs change, AppSync provides more flexibility when it comes to handling authorization and a few other features. To add an API in Amplify, we'll simply type the following command in our project's terminal: amplify add api While in the CLI prompts, choose the following options: GraphQL [enter] to accept the default name API key "Contact form public API" [enter] to accept a default of 7 days [enter] to accept "No, I am done." [enter] to accept the default "N" option [enter] for a Single object with fields "y" to edit the schema now Select your editor of choice By selecting those options through the prompts, we told Amplify how we would like our API to be built. At this point, Amplify has opened up a file called schema.graphql with a sample Todo object. Replace everything in that file with the following: type Candidate @model @auth(rules: [{ allow: public, operations: [create] }]) { id: ID! name: String! email: AWSEmail! } To break down what's happening here, we are first creating a type called Candidate. In our application, a Candidate represents the user submitting their information. The @model text is called a directive. When Amplify sees this, it will automatically create a DynamoDB table and create CRUDL operations for the type it's associated with (in this case, Candidate). The @auth directive setups up authorization rules on our API. Here we are saying, "We want our API to be public to anyone with an API key, but we only want them to be able to create entries in out database, they can't read, update, or delete items. The next few lines are the fields associated with a Candidate. Here it's required that every Candidate has a unique id (automatically created with ID), a name, and an email -- AWS has a primitive called AWSEmail that automatically validates an email pattern. With that, our API and database are ready to be deployed. Before doing so, let's move on to our function. Setting Up Our Function Trigger AWS Lambda is an event-driven function. Meaning, it is called as a response to something. Often times, this is an endpoint like /pets, but in our application, we want this function to be called whenever an item is added to our database. Fortunately, Amplify takes care of this process by allowing us to set this up from the CLI. In our terminal, let's go through the following prompts: amplify add function Lambda function (serverless function) "contactformuploader" as the name of the function NodeJS Lambda Trigger Amazon DynamoDB Stream Use API category graphql @model backed DynamoDB table in the current Amplify project [enter] to not configure advanced settings [enter] to edit the local function now Choose your editor of choice This will open up the function in your editor. Before we remove the contents, let's chat about the generated code. When a change happens to a record in our database -- a change being either a INSERT, MODIFY, or REMOVE event, that information is sent as a stream of data to our lambda function. However, our database can undergo heavy traffic. So instead of firing our lambda for one change at a time, the changes can be sent in batches called shards. No need to get too technical, but this is why the generated code is iterating over event.Records. To drive the concept home, here's a diagram to showcase streaming and sharding: With that mini-lesson out of the way, let's replace the content in our lambda function, with the following: const aws = require('aws-sdk') const ses = new aws.SES() exports.handler = async (event) => { for (const streamedItem of event.Records) { if (streamedItem.eventName === 'INSERT') { //pull off items from stream const candidateName = streamedItem.dynamodb.NewImage.name.S const candidateEmail = streamedItem.dynamodb.NewImage.email.S await ses .sendEmail({ Destination: { ToAddresses: [process.env.SES_EMAIL], }, Source: process.env.SES_EMAIL, Message: { Subject: { Data: 'Candidate Submission' }, Body: { Text: { Data: `My name is ${candidateName}. You can reach me at ${candidateEmail}` }, }, }, }) .promise() } } return { status: 'done' } } This function will be automatically called when a candidate submits their information. The event will contain the related stream of data. So from here our job is simple: Grab the items from the stream, and send an email. Using the AWS SDK, we call the sendEmail function from the ses module. Wit that out of the way, we now have at least touched on all the pieces of our backend. We still however have a couple loose ends. Our function doesn't have permission to interact with SES We need to setup this process.env.SES_EMAILvariable We've yet to setup SES Our frontend code isn't setup to interact with our backend. Let's change gears for a bit and start with the third item and revisit the others. Setting Up SES As mentioned earlier, Amazon Simple Email Service (SES) provides a scalable way to send email. When first setting up SES, AWS will place you in sandbox mode. This means we'll have the following constraints: We can only send/receive to verified email addresses We can only send 1 email/sec Only 200 emails/day are allowed Fortunately for our application, this won't matter too much. To get started, let's hop into our terminal and run the following command: amplify console When prompted, select "Amplify console". 📝 you may be asked to log in to your AWS account Once logged in, search for "SES" in the top search bar of the console and hit enter. You should see a view similar to the one above. If not, you may need to click the top banner to be taken to this newer UI. From here, perform the following steps: Click the orange "Create identity" button Select the "Email address" option and enter your desired email Click the orange "Create identity" button That's it! Setting up an email for this service is well...simple 😅 There are two things we'll need before we hop back into our code. First, copy the ARN for your email by clicking the copy icon on the verified identities screen as show in the screenshot below: Store that in a notepad. We'll need it in a bit. Next, SES sent a confirmation email to the email address that was provided. Click the verification link and we're all set to head back to our code. Updating Our Lambda Recall that we need to both give our function permission to access SES, and add an environment variable to the function called SES_EMAIL. Let's first update the permissions. In your project directory we'll want to navigate to the following directory: amplify/backend/function/your-function-name/ Inside of this directory, you'll see the src directory for lambda, and a file titled your-function-name-cloudformation-template.json Select this file. No need to be intimidated, this JSON code is known as Cloudformation and is what Amplify has been creating for us when we were interacting with the CLI. It's full of settings and rules and we're about to add one more. lambdaexecutionpolicy (it should be right around line 132). This object has a Statement array that currently contains a single object. This object lets our function create logs in AWS. Add the following object to the Statement array and save: { "Action": ["ses:SendEmail"], "Effect": "Allow", "Resource": "the-arn-you-copied-from-ses" } This small addition gives our function the ability to call the sendEmail function using the email we verified. The lambdaexecutionpolicy object should look like the below screenshot (note I removed my email in place of a * for a bit more flexibility): The next step is to add the environment variable to our function. Back in the terminal, run the following command: amplify update function Enter the following options: Lambda function (serverless function) [Select your function name] Environment variables configuration type SES_EMAIL Enter the email that was verified with SES I'm done No -- I don't want to edit the local file now Push Up Our Backend We've done a lot by only running a few commands in the CLI. This templated our resources, but we have yet to push everything up to AWS. Let's fix that by running the following command in the terminal: amplify push This will provide a table of the primary resources we created (recall that our database is a secondary resource created by the @model directive). After selecting that you'd like to continue, select the following options: Yes -- generate code for the API 🔥 JavaScript [enter] to allow the default file path Yes -- generate all possible operations (recall we only allow createper our schema) [enter] to accept a max depth of 2 It'll take a few minutes for our terminal to finish up, but once that's done, our backend is complete 🎉 Let's wrap this up by giving our frontend the ability to talk to our backend. Configuring Amplify Libraries We'll start off by installing the AWS Amplify JavaScript package: npm i aws-amplify Once that is installed, we'll marry our frontend and backend together. In _app.js, add the following lines: import Amplify from '@aws-amplify/core' import config from '../src/aws-exports' Amplify.configure(config) Here we bring in the Amplify library, bring in our config (Amplify generated this and put it in .gitignore), and then we pass in our config to Amplify. Next up, in ContactForm.js, we'll also bring in the following imports: import { API } from 'aws-amplify' import { createCandidate } from '../src/graphql/mutations' 📝 Feel free to check out the createCandidatemutations file that Amplify generated for us. The API category is how we will talk to our AppSync API. Recall that this should not only store the contact in our database, but send an email to our verified address as well. The ContactForm.js file has the following lines of code: // TODO: Add code to send email here console.log('<send to backend here>') Replace the above with this snippet: await API.graphql({ query: createCandidate, variables: { input: { name, email, }, }, }) With that bit out of the way, we can now test our project! Restart your application on localhost:3000 and test it out. If all went well, after a few seconds you'll have an email in your inbox 🎉 📝 Because our emails are being sent via SES, they may show up in a spam folder or flagged by your email provider. This is because we haven't setup DKIM with SES. Though not terribly difficult, it's out of scope for this tutorial. However, if interested, you can read more about it here Hosting Our Project Having this run locally is great, but for a contact form, chances are we want it to be live on the internet. Fortunately, Amplify allows us to do this from the CLI as well. To get started, in our terminal, we'll run the following command: amplify add hosting From the prompts, we'll select the following options: Hosting with Amplify Console Manual Deployment Once selected, we can run the following command to view changes and upon accepting, our application will be deployed and a live on the web: amplify publish Copy and paste the generated URL in your terminal to browser to view. As you may have noticed in the CLI prompts, Amplify also supports git-based deployments. More information on setting that up can be found in the Amplify docs Recap Using Amplify takes care of a lot of the heavy-lifting when it comes to setting up AWS services so that we can focus on our actual business logic. It's also good to remember that Amplify allows us to own the code that we deploy by letting us modify the generated Cloudformation templates. Be sure to follow this series, or follow me on Twitter to get notified when the next iteration of this series comes out: Sending emails with attachments! 📧 Until then 🤖 Top comments (6) This is a terrific workflow process and amplify has really come far. I have been working on backend stuff with SAM and need to revisit frontend. At the step when the lambda function needs to be updated, I am not getting the same choices reflected in the writeup, i.e. " Environment variables configuration" using SAM CLI 1.25.0. My menu says, "1. resource access permissions, scheduled recurring invocation, lambda layers configuration" with the obvious choice being the first one. But this decision path doesn't continue your workflow. Welcome any tips. Thanks. Hey Vince! Glad you liked the post! Environment variables were just released last week. Have you had a chance to update your CLI to the latest version? npm i -g @aws-amplify/cli npm i -g @aws-amplify/cli Things move fast in the AWS universe! I think I upgraded just the previous week. That was the issue. Thanks again for the fast response and series. Kudos.
https://dev.to/mtliendo/serverless-contact-form-using-aws-amplify-1e9m
CC-MAIN-2022-40
en
refinedweb
difference_APIvariant. Transforming dependency artifacts on resolution As described in different kinds of configurations, there may be different variants for the same dependency. For example, an external Maven dependency has a variant which should be used when compiling against the dependency ( java-api), and a variant for running an application which uses the dependency ( java-runtime). A project dependency has even more variants, for example the classes of the project which are used for compilation are available as classes directories ( java-api-classes) or as JARs ( java-api-jars). The variants of a dependency may differ in its transitive dependencies or in the artifact itself. For example, the java-api and java-runtime variants of a Maven dependency only differ in the transitive dependencies and both use the same artifact - the JAR file. For a project dependency, the java-api-classes and the java-api-jars variants have the same transitive dependencies and different artifacts - the classes directories and the JAR files respectively. Gradle identifies a variant of a dependency uniquely by its set of attributes. The java-api variant of a dependency is the variant identified by the usage attribute with value java-api. When Gradle resolves a configuration, the attributes on the resolved configuration determine the requested attributes. For all dependencies in the configuration, the variant with the requested attributes is selected when resolving the configuration. For example, when the configuration requests usage=java-api-classes on a project dependency, then the classes directory is selected as the artifact. When the dependency does not have a variant with the requested attributes, resolving the configuration fails. Sometimes it is possible to transform the artifact of the dependency into the requested variant without changing the transitive dependencies. For example, unzipping a JAR transforms the artifact of the java-api-jars variant into the java-api-classes variant. Such a transformation is called Artifact Transform. Gradle allows registering artifact transforms, and when the dependency does not have the requested variant, then Gradle will try to find a chain of artifact transforms for creating the variant. Artifact transform selection and execution As described above, when Gradle resolves a configuration and a dependency in the configuration does not have a variant with the requested attributes, Gradle tries to find a chain of artifact transforms to create the variant. The process of finding a matching chain of artifact transforms is called artifact transform selection. Each registered transform converts from a set of attributes to a set of attributes. For example, the unzip transform can convert from usage=java-api-jars to usage=java-api-classes. In order to find a chain, Gradle starts with the requested attributes and then considers all transforms which modify some of the requested attributes as possible paths leading there. Going backwards, Gradle tries to obtain a path to some existing variant using transforms. For example, consider a minified attribute with two values: true and false. The minified attribute represents a variant of a dependency with unnecessary class files removed. There is an artifact transform registered, which can transform minified from false to true. When minified=true is requested for a dependency, and there are only variants with minified=false, then Gradle selects the registered minify transform. The minify transform is able to transform the artifact of the dependency with minified=false to the artifact with minified=true. Of all the found transform chains, Gradle tries to select the best one: If there is only one transform chain, it is selected. If there are two transform chains, and one is a suffix of the other one, it is selected. If there is a shortest transform chain, then it is selected. In all other cases, the selection fails and an error is reported. After selecting the required artifact transforms, Gradle resolves the variants of the dependencies which are necessary for the initial transform in the chain. As soon as Gradle finishes resolving the artifacts for the variant, either by downloading an external dependency or executing a task producing the artifact, Gradle starts transforming the artifacts of the variant with the selected chain of artifact transforms. Gradle executes the transform chains in parallel when possible. Picking up the minify example above, consider a configuration with two dependencies, the external guava dependency and a project dependency on the producer project. The configuration has the attributes usage=java-runtime-jars,minified=true. The external guava dependency has two variants: usage=java-runtime-jars,minified=falseand usage=java-api-jars,minified=false. Using the minify transform, Gradle can convert the variant usage=java-runtime-jars,minified=false of guava to usage=java-runtime-jars,minified=true, which are the requested attributes. The project dependency also has variants: usage=java-runtime-jars,minified=false, usage=java-runtime-classes,minified=false, usage=java-api-jars,minified=false, usage=java-api-classes,minified=false and a few more. Again, using the minify transform, Gradle can convert the variant usage=java-runtime-jars,minified=false of the project producer to usage=java-runtime-jars,minified=true, which are the requested attributes. When the configuration is resolved, Gradle needs to download the guava JAR and minify it. Gradle also needs to execute the producer:jar task to generate the JAR artifact of the project and then minify it. The downloading and the minification of the guava.jar happens in parallel to the execution of the producer:jar task and the minification of the resulting JAR. Here is how to setup the minified attribute so that the above works. You need to register the new attribute in the schema, add it to all JAR artifacts and request it on all resolvable configurations. def artifactType = Attribute.of('artifactType', String) def minified = Attribute.of('minified', Boolean) dependencies { attributesSchema { attribute(minified) // (1) } artifactTypes.getByName("jar") { attributes.attribute(minified, false) // (2) } } configurations.all { afterEvaluate { if (canBeResolved) { attributes.attribute(minified, true) // (3) } } } dependencies { registerTransform(Minify) { from.attribute(minified, false).attribute(artifactType, "jar") to.attribute(minified, true).attribute(artifactType, "jar") } } dependencies { // (4) implementation('com.google.guava:guava:27.1-jre') implementation(project(':producer')) } val artifactType = Attribute.of("artifactType", String::class.java) val minified = Attribute.of("minified", Boolean::class.javaObjectType) dependencies { attributesSchema { attribute(minified) // (1) } artifactTypes.getByName("jar") { attributes.attribute(minified, false) // (2) } } configurations.all { afterEvaluate { if (isCanBeResolved) { attributes.attribute(minified, true) // (3) } } } dependencies { registerTransform(Minify::class) { from.attribute(minified, false).attribute(artifactType, "jar") to.attribute(minified, true).attribute(artifactType, "jar") } } dependencies { // (4) implementation("com.google.guava:guava:27.1-jre") implementation(project(":producer")) } Add the attribute to the schema All JAR files are not minified Request minified=trueon all resolvable configurations Add the dependencies which will be transformed You can now see what happens when we run the resolveRuntimeClasspath task which resolves the runtimeClasspath configuration. Observe that Gradle transforms the project dependency before the resolveRuntimeClasspath task starts. Gradle transforms the binary dependencies when it executes the resolveRuntimeClasspath task. > gradle resolveRuntimeClasspath > Task :producer:compileJava > Task :producer:processResources NO-SOURCE > Task :producer:classes > Task :producer:jar > Transform artifact producer.jar (project :producer) with Minify Nothing to minify - using producer.jar unchanged > Task :resolveRuntimeClasspath Minifying guava-27.1-jre.jar Nothing to minify - using listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar unchanged Nothing to minify - using jsr305-3.0.2.jar unchanged Nothing to minify - using checker-qual-2.5.2.jar unchanged Nothing to minify - using error_prone_annotations-2.2.0.jar unchanged Nothing to minify - using j2objc-annotations-1.1.jar unchanged Nothing to minify - using animal-sniffer-annotations-1.17.jar unchanged Nothing to minify - using failureaccess-1.0.1.jar unchanged BUILD SUCCESSFUL in 0s 3 actionable tasks: 3 executed Implementing artifact transforms Similar to task types, an artifact transform consists of an action and some parameters. The major difference to custom task types is that the action and the parameters are implemented as two separate classes. The implementation of the artifact transform action is a class implementing TransformAction. You need to implement the transform() method on the action, which converts an input artifact into zero, one or multiple of output artifacts. Most artifact transforms will be one-to-one, so the transform method will transform the input artifact to exactly one output artifact. The implementation of the artifact transform action needs to register each output artifact by calling TransformOutputs.dir() or TransformOutputs.file(). You can only supply two types of paths to the dir or file methods: An absolute path to the input artifact or in the input artifact (for an input directory). A relative path. Gradle uses the absolute path as the location of the output artifact. For example, if the input artifact is an exploded WAR, then the transform action can call TransformOutputs.file() for all jar files in the WEB-INF/lib directory. The output of the transform would then be the library JARs of the web application. For a relative path, the dir() or file() method returns a workspace to the transform action. The implementation of the transform action needs to create the transformed artifact at the location of the provided workspace. The output artifacts replace the input artifact in the transformed variant in the order they were registered. For example, if the configuration consists of the artifacts lib1.jar, lib2.jar, lib3.jar, and the transform action registers a minified output artifact <artifact-name>-min.jar for the input artifact, then the transformed configuration consists of the artifacts lib1-min.jar, lib2-min.jar and lib3-min.jar. Here is the implementation of an Unzip transform which transforms a JAR file into a classes directory by unzipping it. The Unzip transform does not require any parameters. Note how the implementation uses @InputArtifact to inject the artifact to transform into the action. It requests a directory for the unzipped classes by using TransformOutputs.dir() and then unzips the JAR file into this directory. abstract class Unzip implements TransformAction<TransformParameters.None> { // (1) @InputArtifact // (2) abstract Provider<FileSystemLocation> getInputArtifact() @Override void transform(TransformOutputs outputs) { def input = inputArtifact.get().asFile def unzipDir = outputs.dir(input.name) // (3) unzipTo(input, unzipDir) // (4) } private static void unzipTo(File zipFile, File unzipDir) { // implementation... } } abstract class Unzip : TransformAction<TransformParameters.None> { // (1) @get:InputArtifact // (2) abstract val inputArtifact: Provider<FileSystemLocation> override fun transform(outputs: TransformOutputs) { val input = inputArtifact.get().asFile val unzipDir = outputs.dir(input.name) // (3) unzipTo(input, unzipDir) // (4) } private fun unzipTo(zipFile: File, unzipDir: File) { // implementation... } } Use TransformParameters.Noneif the transform does not use parameters Inject the input artifact Request an output location for the unzipped files Do the actual work of the transform An artifact transform may require parameters, like a String determining some filter, or some file collection which is used for supporting the transformation of the input artifact. In order to pass those parameters to the transform action, you need to define a new type with the desired parameters. The type needs to implement the marker interface TransformParameters. The parameters must be represented using managed properties and the parameters type must be a managed type. You can use an interface declaring the getters and Gradle will generate the implementation. All getters need to have proper input annotations, see the table in the section on incremental build. You can find out more about implementing artifact transform parameters in Developing Custom Gradle Types. Here is the implementation of a Minify transform that makes JARs smaller by only keeping certain classes in them. The Minify transform requires the classes to keep as parameters. Observe how you can obtain the parameters by TransformAction.getParameters() in the transform() method. The implementation of the transform() method requests a location for the minified JAR by using TransformOutputs.file() and then creates the minified JAR at this location. abstract class Minify implements TransformAction<Parameters> { // (1) interface Parameters extends TransformParameters { // (2) @Input Map<String, Set<String>> getKeepClassesByArtifact() void setKeepClassesByArtifact(Map<String, Set<String>> keepClasses) } @PathSensitive(PathSensitivity.NAME_ONLY) @InputArtifact abstract Provider<FileSystemLocation> getInputArtifact() @Override void transform(TransformOutputs outputs) { def fileName = inputArtifact.get().asFile.name for (entry in parameters.keepClassesByArtifact) { // (3) if (fileName.startsWith(entry.key)) { def void minify(File artifact, Set<String> keepClasses, File jarFile) { println "Minifying ${artifact.name}" // Implementation ... } } abstract class Minify : TransformAction<Minify.Parameters> { // (1) interface Parameters : TransformParameters { // (2) @get:Input var keepClassesByArtifact: Map<String, Set<String>> } @get:PathSensitive(PathSensitivity.NAME_ONLY) @get:InputArtifact abstract val inputArtifact: Provider<FileSystemLocation> override fun transform(outputs: TransformOutputs) { val fileName = inputArtifact.get().asFile.name for (entry in parameters.keepClassesByArtifact) { // (3) if (fileName.startsWith(entry.key)) { val fun minify(artifact: File, keepClasses: Set<String>, jarFile: File) { println("Minifying ${artifact.name}") // Implementation ... } } Declare the parameter type Interface for the transform parameters Use the parameters Use the unchanged input artifact when not minification is required Remember that the input artifact is a dependency, which may have its own dependencies. If your artifact transform needs access to those transitive dependencies, it can declare an abstract getter returning a FileCollection and annotate it with @InputArtifactDependencies. When your transform runs, Gradle will inject the transitive dependencies into that FileCollection property by implementing the getter. Note that using input artifact dependencies in a transform has performance implications, only inject them when you really need them. Moreover, artifact transforms can make use of the build cache for their outputs. To enable the build cache for an artifact transform, add the @CacheableTransform annotation on the action class. For cacheable transforms, you must annotate its @InputArtifact property — and any property marked with @InputArtifactDependencies — with normalization annotations such as @PathSensitive. The following example shows a more complicated transforms. It moves some selected classes of a JAR to a different package, rewriting the byte code of the moved classes and all classes using the moved classes (class relocation). In order to determine the classes to relocate, it looks at the packages of the input artifact and the dependencies of the input artifact. It also does not relocate packages contained in JAR files in an external classpath. @CacheableTransform // (1) abstract class ClassRelocator implements TransformAction<Parameters> { interface Parameters extends TransformParameters { // (2) @CompileClasspath // (3) ConfigurableFileCollection getExternalClasspath() @Input Property<String> getExcludedPackage() } @Classpath // (4) @InputArtifact abstract Provider<FileSystemLocation> getPrimaryInput() @CompileClasspath @InputArtifactDependencies // (5) abstract FileCollection getDependencies() @Override void transform(TransformOutputs outputs) { def primaryInputFile = primaryInput.get().asFile if (parameters.externalClasspath.contains(primaryInput)) { // (6) outputs.file(primaryInput) } else { def baseName = primaryInputFile.name.substring(0, primaryInputFile.name.length - 4) relocateJar(outputs.file("$baseName-relocated.jar")) } } private relocateJar(File output) { // implementation... def relocatedPackages = (dependencies.collectMany { readPackages(it) } + readPackages(primaryInput.get().asFile)) as Set def nonRelocatedPackages = parameters.externalClasspath.collectMany { readPackages(it) } def relocations = (relocatedPackages - nonRelocatedPackages).collect { packageName -> def toPackage = "relocated.$packageName" println("$packageName -> $toPackage") new Relocation(packageName, toPackage) } new JarRelocator(primaryInput.get().asFile, output, relocations).run() } } @CacheableTransform // (1) abstract class ClassRelocator : TransformAction<ClassRelocator.Parameters> { interface Parameters : TransformParameters { // (2) @get:CompileClasspath // (3) val externalClasspath: ConfigurableFileCollection @get:Input val excludedPackage: Property<String> } @get:Classpath // (4) @get:InputArtifact abstract val primaryInput: Provider<FileSystemLocation> @get:CompileClasspath @get:InputArtifactDependencies // (5) abstract val dependencies: FileCollection override fun transform(outputs: TransformOutputs) { val primaryInputFile = primaryInput.get().asFile if (parameters.externalClasspath.contains(primaryInput)) { // (6) outputs.file(primaryInput) } else { val baseName = primaryInputFile.name.substring(0, primaryInputFile.name.length - 4) relocateJar(outputs.file("$baseName-relocated.jar")) } } private fun relocateJar(output: File) { // implementation... val relocatedPackages = (dependencies.flatMap { it.readPackages() } + primaryInput.get().asFile.readPackages()).toSet() val nonRelocatedPackages = parameters.externalClasspath.flatMap { it.readPackages() } val relocations = (relocatedPackages - nonRelocatedPackages).map { packageName -> val toPackage = "relocated.$packageName" println("$packageName -> $toPackage") Relocation(packageName, toPackage) } JarRelocator(primaryInput.get().asFile, output, relocations).run() } } Declare the transform cacheable Interface for the transform parameters Declare input type for each parameter Declare a normalization for the input artifact Inject the input artifact dependencies Use the parameters Registering artifact transforms You need to register the artifact transform actions, providing parameters if necessary, so that they can be selected when resolving dependencies. In order to register an artifact transform, you must use registerTransform() within the dependencies {} block. There are a few points to consider when using registerTransform(): The fromand toattributes are required. The transform action itself can have configuration options. You can configure them with the parameters {}block. You must register the transform on the project that has the configuration that will be resolved. You can supply any type implementing TransformAction to the registerTransform()method. For example, imagine you want to unpack some dependencies and put the unpacked directories and files on the classpath. You can do so by registering an artifact transform action of type Unzip, as shown here: def artifactType = Attribute.of('artifactType', String) dependencies { registerTransform(Unzip) { from.attribute(artifactType, 'jar') to.attribute(artifactType, 'java-classes-directory') } } val artifactType = Attribute.of("artifactType", String::class.java) dependencies { registerTransform(Unzip::class) { from.attribute(artifactType, "jar") to.attribute(artifactType, "java-classes-directory") } } Another example is that you want minify JARs by only keeping some class files from them. Note the use of the parameters {} block to provide the classes to keep in the minified JARs to the Minify transform. def artifactType = Attribute.of('artifactType', String) def minified = Attribute.of('minified', Boolean) def keepPatterns = [ "guava": [ "com.google.common.base.Optional", "com.google.common.base.AbstractIterator" ] as Set ] dependencies { registerTransform(Minify) { from.attribute(minified, false).attribute(artifactType, "jar") to.attribute(minified, true).attribute(artifactType, "jar") parameters { keepClassesByArtifact = keepPatterns } } } val artifactType = Attribute.of("artifactType", String::class.java) val minified = Attribute.of("minified", Boolean::class.javaObjectType) val keepPatterns = mapOf( "guava" to setOf( "com.google.common.base.Optional", "com.google.common.base.AbstractIterator" ) ) dependencies { registerTransform(Minify::class) { from.attribute(minified, false).attribute(artifactType, "jar") to.attribute(minified, true).attribute(artifactType, "jar") parameters { keepClassesByArtifact = keepPatterns } } } Implementing incremental artifact transforms Similar to incremental tasks, artifact transforms can avoid work by only processing changed files from the last execution. This is done by using the InputChanges interface. For artifact transforms, only the input artifact is an incremental input, and therefore the transform can only query for changes there. In order to use InputChanges in the transform action, inject it into the action. For more information on how to use InputChanges, see the corresponding documentation for incremental tasks. Here is an example of an incremental transform that counts the lines of code in Java source files: abstract class CountLoc implements TransformAction<TransformParameters.None> { @Inject abstract InputChanges getInputChanges() @PathSensitive(PathSensitivity.RELATIVE) @InputArtifact abstract Provider<FileSystemLocation> getInput() @Override void transform(TransformOutputs outputs) { // (1) def outputDir = outputs.dir("${input.get().asFile.name}.loc") println("Running transform on ${input.get().asFile.name}, incremental: ${inputChanges.incremental}") inputChanges.getFileChanges(input).forEach { change -> // (2) def changedFile = change.file if (change.fileType != FileType.FILE) { return } def outputLocation = new File(outputDir, "${change.normalizedPath}.loc") switch (change.changeType) { case ADDED: case MODIFIED: println("Processing file ${changedFile.name}") outputLocation.parentFile.mkdirs() outputLocation.text = changedFile.readLines().size() case REMOVED: println("Removing leftover output file ${outputLocation.name}") outputLocation.delete() } } } } abstract class CountLoc : TransformAction<TransformParameters.None> { @get:Inject abstract val inputChanges: InputChanges @get:PathSensitive(PathSensitivity.RELATIVE) @get:InputArtifact abstract val input: Provider<FileSystemLocation> override fun transform(outputs: TransformOutputs) { // (1) val outputDir = outputs.dir("${input.get().asFile.name}.loc") println("Running transform on ${input.get().asFile.name}, incremental: ${inputChanges.isIncremental}") inputChanges.getFileChanges(input).forEach { change -> // (2) val changedFile = change.file if (change.fileType != FileType.FILE) { return@forEach } val outputLocation = outputDir.resolve("${change.normalizedPath}.loc") when (change.changeType) { ChangeType.ADDED, ChangeType.MODIFIED -> { println("Processing file ${changedFile.name}") outputLocation.parentFile.mkdirs() outputLocation.writeText(changedFile.readLines().size.toString()) } ChangeType.REMOVED -> { println("Removing leftover output file ${outputLocation.name}") outputLocation.delete() } } } } } Inject InputChanges Query for changes in the input artifact
https://docs.gradle.org/current/userguide/dependency_management_attribute_based_matching.html
CC-MAIN-2019-30
en
refinedweb
able.)) Express Delivery...is not available today (just normal delivery) The Only Newspaper That Is Not Controlled By The Cabal Who Are You What Are You Do- AAAAAA May, 4th 2013 - May the Fourth be with you! • Issue 182 • Sponsered by (and biased towards) the Rebel Alliance. This is not "Wednesday or Thursday" Are you back, then? Or did you just find a cyber-cafe in your tropical vacation paradise? Spıke Ѧ 01:21 5-May-13 - I'm back now, spent Thursday riding in the car (4 or 5 hour drive) back to my parents house, spent Thursday night over there since it was late. On Friday my uncle came in from New Jersey to visit us so I stayed around at my parents house long enough to see him (my parents live about an hour an a half drive from my place), and by the time he got in and we finished dinner, it was late so I spent Friday night over. My mom went out to an event on Saturday with her sorority (yes, it's a non-collegiate sorority for "education and entertainment purposes") so I stuck around until she got back so I could say goodbye, and in the meantime borrowed her computer. Also, yes, there was a computer room where we were staying near the ocean (Long Shores WA)and I did check in a couple times but didn't log in, looked like everything was under control when I checked in for about a half hour at a time. Thought briefly about logging in to make a forum comment but decided it wasn't important. There were only two computers for the whole resort building though, so I couldn't stay on those computers too long. But yes, now I'm back. And my cat is happy to see me. -- Simsilikesims(♀UN) Talk here. 04:59, May 5, 2013 (UTC) Hello. Welcome:03, May 5, 2013 (UTC) - Thanks, it's good to be back, where I can goof around on my computer AND watch TV at the same time, and have access to all my mp3's. -- Simsilikesims(♀UN) Talk here. 06:17, May 5, 2013 (UTC) I have always said that pristine beaches and fascinating local culture and cuisine are overrated! (Yes, I have been around computers when the concept of "background processing" while you do work was sorcery, and that multiple windows with moving contents was unthinkable.) The joint looks like it's lousy with Admins today, which is good, because I am in my third of three all-day sessions working a baseball tournament. No such major distractions between now and the start of our tiny regular summer season in June (which I don't work but only watch...depending on who quits between now and then). Spıke Ѧ 10:59 5-May-13 Ban Patrol Why list an IP on Ban Patrol for creating the one-word article ("Hello.") and then ban him yourself? Complete paperwork? The first thing I thought of when I saw him and deleted the one-word article was our "Hello. And goodbye" vandal whom I took care of at SpamRegex. I suspect he can easily change IPs anyway. Spıke Ѧ 01:39 6-May-13 - I wasn't the one who listed him on Ban Patrol. I was tempted to leave him be, but I decided he could stand to read HTBFANJS and learn a quick lesson. If he is the "Hello. And goodbye" vandal, he more than likely could change IPs, but that is something to be dealt with when it happens. -- Simsilikesims(♀UN) Talk here. 01:50, May 6, 2013 (UTC) I see; I misread it, as it was you who provided the details. No problem and no harm done by the two-hour ban. Spıke Ѧ 02:15 6-May-13 My new friend Tonight's vandal has identified himself as the site's old "Juicy taste of dia**hea" vandal, who predates me. There are defenses against him everywhere, all of which assume he will not alter his attack strategy. Rollback doesn't work when he hits the same page from two different IPs; there are about two dozen of these left in RecentChanges/hide-patrolled. But my T-Mobile service lets me work at only about one-twentieth of his pace. Meanwhile, another Anon is trying to soften up ScottPat for a move to the Fork. I suspect my old mentor MrN9000, as he behaves as a dick while appealing to me personally not to do the same in response. Sleazy, like everything else about the fork. Sleazier than Wikia. Spıke Ѧ 02:01 8-May-13 - It could be ANYONE from the fork who left the message on ScottPat's page. Unfortunately, this "new Uncyclopedia" is beginning to seem more and more like "New Coke" (if you remember that marketing fiasco - nobody liked it and they took it off the market). True, the censoring here can be draconian, but at least it keeps people from becoming preoccupied with the thing censored and then that becoming their primary focus. True, the content warning here "breaks the joke", but Wikia has required it due to legal issues I suspect, plus possibly one of their executives got offended by some of the content here and wanted to make it clear that Wikia does not officially endorse any of this. At the fork, they don't have problems with people going over the admins heads and complaining to wikia; thus no content warning there is required, but they are also at risk for potential lawsuits, plus they have their expensive server which is not paid for yet with the donations they have received thus far. One big lawsuit could sink their site easily, we aren't exposed in the same way. The fact that they are still coming here to cherry pick our editors is sleazy indeed, but it also means they aren't attracting enough new users on their own. -- Simsilikesims(♀UN) Talk here. 02:13, May 8, 2013 (UTC) - Whoops, accidentally rolled back your edits to this page while removing the fresh batch of steaming vandalism. I guess I deserve 49 lashes for not being observant enough as to which edits I was reverting. ◄► Tephra ◄► 03:24, May 8, 2013 (UTC) - Would you like your lashes with a wet noodle or a feather? -- Simsilikesims(♀UN) Talk here. 03:25, May 8, 2013 (UTC) - A wad of cash if possible. Also, I don't seem to have the ability to rollback all of the vandal's edits. I rolled back all that I could, but the ones that I haven't done would require me to manually undo them... why would that be? ◄► Tephra ◄► 03:30, May 8, 2013 (UTC) - If someone else (including the vandal using another IP) edits after the vandal, then rollback only works on the last editor of the page, and edits by someone other than the last editor of the page have to be undone manually, or one can go to the history and edit a version of the page prior to the vandalism. ---- Simsilikesims(♀UN) Talk here. 04:15, May 8, 2013 (UTC) - Well I knew that... However I just checked the history of one of the edits and saw that I had reverted an older edit which had been vandalized twice more and reverted twice before I finally reverted the original hit. So yes, you are right, I just hadn't realized the page had been edited four times between my revert and what I was reverting. Whew... I hope that made sense. I am not used dealing with this level of malicious behavior, although once I did have to deal with a coordinated attack from twenty different vandals at once on another wiki where I am a bureaucrat. ◄► Tephra ◄► 04:52, May 8, 2013 (UTC) This UnSignpost may be unsuitable for some viewers! The Newspaper Made Entirely From Recycled Internet Memes May, 10th 2013 • Issue 183 • Uncyclopedia set to have a bright future. Sunglasses stocks are running low. Update Otherwall I queried the author about this at User talk:Rpsingh. Would you take a look and decide what action is warranted? Spıke Ѧ 20:25 9-May-13 - I think the ICU on it is sufficient. The name probably isn't really anybody's real name, so I don't think that this is vanity, but it really needs to have more humor in it, not read like a dry biography. After all, this is a humor site, not a fiction site. A hoax about a person who doesn't exist isn't exactly humor material. -- Simsilikesims(♀UN) Talk here. 06:02, May 10, 2013 (UTC) Now blanked by yet another Anon (undone by Graphium, a new user who has done some good policing). As I told him, protecting this article against the too-many-Anons-spoiling-the-soup is not a solution; that would also freeze out Rpsingh for a couple more days. Speaking of food again, I recently went to WP:Cooking utensils to verify (for the evolving Cap'n Crunch) that the military weapon I had in mind was called a whisk. For some reason the entire table at Wikipedia struck me as hilarious and desperately in need of satire, for the benefit of everyone from prissy cooks to sloppy cooks to malevolent cooks. Spıke Ѧ 10:09 10-May-13 - If you want to make an article on Cooking Utensils, go right ahead; I would consider myself in the realm of lazy cooks (heat up a microwave dinner and call it done). But yes, the topic is ripe for satire. I don't know how it would be done without resorting to a list-like format though. -- Simsilikesims(♀UN) Talk here. 17:51, May 10, 2013 (UTC) Hi, is the email in your preferences right? I've mailed a couple of time recently about some srs stuff (well, a bit). Can you let me know if there's another address I should use? Many thanks -- sannse (talk) 23:50, May 9, 2013 (UTC) - The email @yahoo.com should be the correct one, stuff gets lost in there though because I am not checking it constantly and I get more offers than I need. -- Simsilikesims(♀UN) Talk here. 05:25, May 10, 2013 (UTC) - PS Found your email and replied to it. -- Simsilikesims(♀UN) Talk here. 17:52, May 10, 2013 (UTC) Autopatrolled for Anton199 I agree with you that Anton is a good team player. I had not given him Autopatrolled because his English is still spotty enough that I like to check his edits. However, he mostly edits a few specific projects in collaboration with ScottPat, so this is not a big problem. Spıke Ѧ 19:11 10-May-13 - I do not usually have my preferences set to see only unpatrolled edits on Recent Changes, and I like to see what our established editors are up to, so I can still check on these edits from time to time. Usually someone will come along and fix grammar and spelling eventually, if I don't happen to see it. I'm sorry if that someone happens to be you, I don't like to see you having to do all the work, but as you can understand, time and energy are limited and I do like to do things outside this site. Now that I'm back, and not working full-time, I should be on the site more often (when I'm not playing Facebook games or editing Castleville wiki which has basically one active editor, which is me, plus an admin (not me). )---- Simsilikesims(♀UN) Talk here. 19:20, May 10, 2013 (UTC) Wow: one Indian and one Chief. That could be us except that our Chief saw fit to elect two new Chiefs. I only filter for unpatrolled when I am willing to turn on JavaScript, which slows my T-Mobile USB suppository to its knees. Snippy often patrols overnight, and I've given ScottPat some instruction; however, during my baseball absence last weekend, we fell a weekend behind. Spıke Ѧ 20:57 10-May-13 Thank you! I'm probably the first active user to log on for a while since you've been on. I just woke up, checked my Uncyc. watchlist and it is full of pages that you have reverted an IP attack on. Thanks you have done a tremendous job and I still see more pages on my watchlist that need reverting so I'll help you:14, May 16, 2013 (UTC) Please ignore above as I pretty sure the reverting isn't you but a bot of some type. If you have cometo this page to thank Sims then you have misunderstood the situation like I did. The bot with Simsie's name is simply reverting the last IP edit on each page however below that there are many other vandal edits. All the pages it says Simsie has reverted haven't actually been fixed. More on village dump in new forum.:37, May 16, 2013 (UTC) - The vandalism is so extensive that I have started by rolling back edits of the last vandal to visit the page. The rollback tool apparently has its limits, had to manually rollback more edits beyond that. I am slowly working through the unpatrolled edits, and hope to have all this cleaned up eventually. Will check the forum. -- Simsilikesims(♀UN) Talk here. 06:40, May 16, 2013 (UTC) That explains why. I presumed that the rollbacks were a bot as rolling it back to the last IP hasn't worked and there are many that you have edited that still have the vandalism:46, May 16, 2013 (UTC) PS - Thanks for the work you've done to revert the vandals., May 16, 2013 (UTC) Vandal repair This (1800) works for me (but only because I'm at McDonald's drinking coffee. 1500 doesn't work. Spıke Ѧ 00:43 17-May-13 - I have been hiding unpatrolled edits in my preferences in order to work on this vandal issue. However, the default is 500, so I took a peek at the 1800 to see if progress is being made. Unfortunately, no light at the end of the tunnel yet. -- Simsilikesims(♀UN) Talk here. 00:49, May 17, 2013 (UTC) I don't agree; somewhere between 1500 and 1800 the pre-vandalism unpatrolled changes appear, and 1800 gives the same list as 2000 (or at least it ends in the same place). Spıke Ѧ 00:56 17-May-13 I'm sorry, that's not true; am now seeing a tunnel past the "end of the tunnel." Spıke Ѧ 00:58 17-May-13 Which means some of the pages I "patrolled" to keep them from popping back up as "unpatrolled" will do so anyway later. Spıke Ѧ 01:29 17-May-13 - At least it takes me less time when I realize that you have already fixed a page I am patrolling when I go in to fix it. -- Simsilikesims(♀UN) Talk here. 01:37, May 17, 2013 (UTC) We are down under 1500 now. Spıke Ѧ 03:35 20-May-13 - Yes, thank heavens the show 1800 tool now shows pre-vandal edits to be patrolled. Next goal: to get it under 500. -- Simsilikesims(♀UN) Talk here. 03:37, May 20, 2013 (UTC) I got it down to 650--nothing left but the Forums. Spıke Ѧ 21:43 20-May-13 - I now have patrolled all the rest of the vandal edits from May 15th. There are still some pre-vandal edits to patrol from May 15th, and May 16th to go. -- Simsilikesims(♀UN) Talk here. 00:28, May 21, 2013 (UTC) You are awesome! Your PC is too. But mine was yard-sale cheap. Spıke Ѧ 00:35 21-May-13 - I've got two like that in my back room (one that runs XP and still goes online occasionally, and one that runs Win 98 and never goes online anymore). Both hard drives of those computers are full or nearly so, however. The XP in the back room has a hard drive so full I can't install Service Pack 3 on it either. The notebook I'm using now I got in January last year, so its not spanking new, but I don't have to worry about the confusing new Windows 8 interface either. I like computers to act like computers, not like tablets. -- Simsilikesims(♀UN) Talk here. 00:41, May 21, 2013 (UTC) "Like computers, not like cartoons" is the way I would put it. Spıke Ѧ 01:00 21-May-13 Hey, I see a bunch of "Unpatrolled" flags still on May 15th edits. As that sign by the toilet says, The job isn't done until the paperwork is complete! Spıke Ѧ 17:20 21-May-13 - I just finished patrolling the rest of the pre-vandal edits on May 15th just now - if you are still showing unpatrolled edits from that date when you refresh the page, something isn't in sync here. -- Simsilikesims(♀UN) Talk here. 19:00, May 21, 2013 (UTC) The difference is between "patrolling the edits" and marking the edits as patrolled. [1] If you are saying you are sure the vandalism from May 15th is undone, I can go through and clear the flags. But you have the sexier computer. Spıke Ѧ 19:11 21-May-13 - I did mark them as patrolled, and they show marked on my computer. Strange. Also, all forums that are protected have been patrolled and marked patrolled. I don't know why your computer doesn't show them as marked. Maybe I should experiment with another browser to see if I see unmarked results on another browser. -- Simsilikesims(♀UN) Talk here. 19:14, May 21, 2013 (UTC) You marked what as patrolled? and how? If you used the "patrol these changes" JavaScript button for RecentChanges, you are only marking the edits that appear on the report, and if the report was not long enough to show the entire vandal attack, it is earlier hits that are now coming up in the report. Spıke Ѧ 19:19 21-May-13 - Yes, I did use the "patrol these changes" JavaScript button. But when I refresh my page to show recent changes, I am not showing any new changes from the 15th to mark patrolled. I plan to experiment with using Chrome instead of Firefox to see if I can see any "unpatrolled" changes that are coming up as unmarked. -- Simsilikesims(♀UN) Talk here. 19:23, May 21, 2013 (UTC) Using a different browser might render the report differently on your screen (colors, fonts) but will not change the information in the report; and there is no reason you should not be getting the same report I am if you type. There are screenfuls of edits between 17:26 and 18:34 (your time) on the 15th that are unpatrolled, and on several of these, the page is not completely repaired. Spıke Ѧ 19:46 21-May-13 - I tried using Chrome instead of Firefox, but am still not showing any unpatrolled edits from the 15th. Which pages are you showing? -- Simsilikesims(♀UN) Talk here. 21:06, May 21, 2013 (UTC) Again, your particular browser renders a report for display on your screen; but what's in the report is done at Wikia. The first three relevant pages in my report: All show unpatrolled edits at 18:34 your time. The pages themselves contain vandalism at the top. There are hundreds more. PS--I mentioned the current confusion at User talk:Furry. Spıke Ѧ 21:24 21-May-13 PPS--Hmm, if you have your account set to display UTC rather than local time, these would be on the 16th. Spıke Ѧ 21:25 21-May-13 - That would explain everything. These are indeed showing up as the 16th rather than the 15th, and I do have my preferences set to display UTC rather than local time. I still have lots to do from the 16th (or the 15th local time), including those pages you mentioned above. -- Simsilikesims(♀UN) Talk here. 21:28, May 21, 2013 (UTC) - UPDATE: I have got the vandal edits down to only 00:49 and 00:50 from the 16th, hopefully throttling should prevent so many edits from occurring at once from IP's in the future. -- Simsilikesims(♀UN) Talk here. 17:14, May 22, 2013 (UTC) Yes, throttling is working so well that even I was able to fix stuff on Monday faster than our vandal was able to break stuff. Spıke Ѧ 17:25 22-May-13c - So glad to hear it! I think I finally got the last of that vandal mess cleaned up now. Routine patrolling can resume as normal. -- Simsilikesims(♀UN) Talk here. 18:23, May 22, 2013 (UTC) Confirming that. Thanks again. Spıke Ѧ 18:46 22-May-13 What do think about the quality of this article? In its construction state, what do you think about the humour, the structure and the images so far in my article about League of Legends? I will send it in for a real review when it's finished. (Saddex (talk) 12:49, May 17, 2013 (UTC)) - User:SPIKE attempted to review it, but he don't know to much about gaming. So, even in its state of construction, how would you rate the article, on 0-50? (Saddex (talk) 19:23, May 17, 2013 (UTC)) - For a proper review out of 50 please put this article on pee review. Then ask Simsie to review:11, May 17, 2013 (UTC) - This article has a lot of redlinks. It is useless to compare it to Dota2 when the reader doesn't know what Dota2 is. Also, I am not familiar with this particular game, though I have some idea about multiplayer games I have not actually played World of Warcraft for instance, mainly my experience with multiplayer games is with those originating on Facebook, plus some Ragnarok a few years ago. I am also not familiar with the expression "GLHF" and without this the parody of this expression falls flat. We do have an article on N00B that you can link to in your article, and I'm not sure exactly what you are parodying when you refer to "Op". In IRC, this refers to a channel operator (admin), is it similar? I do recall a two player game I played years ago on a Macintosh called "Don't Fence Me In" which has similar gameplay to the game you are suggesting here - it was on a black and white screen, each player controlled a moving line, and the lines were constantly moving and expanding, you could control the direction of movement, and the idea was to make sure your opponent ran out of space before you did. This was back in the 1980's of course. So thanks for bringing back the memories there, but your article does need some more work to make it funnier. -- Simsilikesims(♀UN) Talk here. 01:09, May 18, 2013 (UTC) - The coloured lines was a completely random idea by me, and isn't even close to the gameplay of the real game. GL HF stands for "Good Luck Have Fun". The "overpowered;op" thing is just what many people blames when they can't admit they failed and got killed by another player. I will add more about the GL HF thing, such as "some dumb people thinks it means "Good luck have fun...". I think LoL players will get a good laugh. Nobody here has said anything of the images yet though. (Saddex (talk) 01:21, May 18, 2013 (UTC)) - Rather than saying that "dumb people", it would be funnier if you said "ignorant people". The former means having a low IQ, the latter means simply lacking knowledge or being unaware. You don't want to insult some of your target audience. -- Simsilikesims(♀UN) Talk here. 01:27, May 18, 2013 (UTC) Major reconstruction I decided to rewrite large parts of the article. It's still not finished, but what do you think? (Saddex (talk) 21:57, May 19, 2013 (UTC)) - Starting the article out with an overused "yo mama" joke doesn't help it much. The hoax (I had to check the wikipedia article on League of Legends) about the development team splitting off from Blizzard is a nice touch, but not sure if it is exactly funny to nonplayers of the game. I do like how you explained what this Dota2 business was about. The hard thing here will be to try to make the article appeal to both players and nonplayers, without making it too long. If it was going to be a feature, it would have to be hilariously funny to both players and nonplayers, which could prove impossible in this case. But I think you are on the right track now, since those who do not play League of Legends are unlikely to look up the article unless they get it by roll of the die by hitting "random article". For the future, you will probably have to monitor the article to make sure that IP's don't (A)vandalize the article, (B) add vanity to the article, and (C) try to modify the article to reflect the truth. Especially C, though all articles have issues with A and B. -- Simsilikesims(♀UN) Talk here. 00:06, May 20, 2013 (UTC) Trombone Anon uploaded this in one gulp; it is in bad shape and comes complete with a {{Fix}} tag. Where did it come from? the mirror site? Spıke Ѧ 19:28 17-May-13 - Suspicion confirmed; it did indeed come from carlb's mirror. Apparently this article had a fix tag that expired. That, or it got VFD. -- Simsilikesims(♀UN) Talk here. 00:57, May 18, 2013 (UTC) Yup, huff log shows suggests that the Chief deleted it last August. Can't plant a deleted article here and abandon it; it's gone. Separately, did you get my email? Wolverhampton is with us right now, but impeded by the throttle and Frosty is handling him in real time. Romartus and ScottPat repaired many articles, probably not most of the old Forums, but lots of patrol flags are still set. Spıke Ѧ 01:06 18-May-13 Transistor As you saw, Transistor redirects to MOSFET transistor. MOSFET (metal-on-silicon field effect transistor) adds utterly nothing to the humor of that article (and is dated; MOS gave way to complementary-MOS, or CMOS, in the 1980s). What if I move the meat to Transistor, ditch "MOSFET", and huff the old page? Spıke Ѧ 02:03 18-May-13 - That would be fine with me; I probably wouldn't have thought to look for "MOSFET transistor", and besides, if it gave way to CMOS in the 80s, that was almost before my time. I don't have an extensive electronics background, besides. -- Simsilikesims(♀UN) Talk here. 02:09, May 18, 2013 (UTC) Denza252 Did you notice on User talk:Romartus that Denza252 claims to have just returned from Taiwan and implies that the brouhaha with IRC pranks and sockpuppets was all an impostor? Spıke Ѧ 02:38 18-May-13 - Yes, I did. Either he is joking, or he needs to create a new account. He begged me on IRC to unblock him, so I gave him 2 more weeks, down from infinity, to think about fixing his signature and so on. I will go respond on that page. -- Simsilikesims(♀UN) Talk here. 03:41, May 18, 2013 (UTC) - There needs to be a film made about this. It is such a complicated saga no one understands what the heck is going:12, May 18, 2013 (UTC) - Agreed, and I think it has something to do with the illumanati, or some disgruntled irc server monkey... I can't be arsed to find out. Oh, and this is Denza252's new account, so, direct all queries to me --The Slayer of Zaramoth DungeonSiegeAddict510 02:45, May 23, 2013 (UTC) The UnSignpost has arrived...Quick hide! The Newspaper the Whole Family Must Enjoy! May, 18th 2013 • Issue 184 • Vandalpedia strikes back! Luckily the Jedi will return. Active admins I replaced my name in the list at UN:AA with my signature file a while ago, by way of personalization, and I invite you to do the same. Especially, it identifies you as a lady and some readers contemplating whom to make a request of might be more comfortable with that. Spıke Ѧ 23:12 18-May-13 - I have done so, now those looking for help via that page will also find the link to my talk page via my signature. -- Simsilikesims(♀UN) Talk here. 02:25, May 19, 2013 (UTC) Genius Factor Games In my opinion, {{Construction}} is too good for this. Anon saddled us with an article solely for the joy of ranting about a game company and calling Ted Nugent a "fat piece of shit." It seems clear the author will never improve it, it doesn't belong in mainspace, and there is no way to userspace it. I don't want to delete it over your tag, but please! it's beggin' for it! TheDarthMoogle agreed but put it on VFD, for which he got a 2-hour technical foul. Spıke Ѧ 12:29 20-May-13 - Nearly 24 hours have elapsed with no work done on it, so I am inclined to agree. Since there are three users now that agree that this article is below standards and unlikely to improve, (SPIKE, DarthMoogle and myself) I will huff it myself.---- Simsilikesims(♀UN) Talk here. 21:16, May 20, 2013 (UTC) User Uncycloperson I criticized new user Uncycloperson for edits that were excessively based on Uncyclopedia memes. He appended a CONGRATULATIONS template to his talk page which struck me as an upright middle finger. Later, he uploaded a photo of someone he "met once" who taught him a sad lesson about web hook-ups, at File:Random Prissy Rich Girl That Can Be Used.jpg. I deleted it with a summary that photos of non-famous people indeed Cannot Be Used. His next work was Gay Rights, which Mhaille saw fit to delete. He is now on Mason-Dixon line, a listy and unfunny collection of stereotypes about redneck racist Southerners and "the sudden urge to lynch a nigger" that has not improved since you tagged it. Not suggesting you do anything except keep an eye on him. Spıke Ѧ 01:00 21-May-13 - That Congrats template is a result of the article Scam and one of the tricks I added into it. I doubt it was added there by any sense of spite. The rest of his edits come across as pretty basic noobishness. I'd suggest give him a prod and a poke to try and get him to ease off a few of his excesses, but not much more at this stage. • Puppy's talk page • 02:25 21 May 2013 On the contrary, I tend to think things mean things, and not necessarily what you meant when you wrote it. Meanwhile, there are more interesting people to prod and poke. By the way, did you notice that Wolverhampton returned a few minutes ago, and that I was able to ban and revert, even working through T-Mobile? Spıke Ѧ 02:30 21-May-13 PS--Thank you for giving him a poke yourself. Wolvy, by the way, is no longer editing articles at the start; and had some of his edits marked bot edits. What does this mean? and how does Anon get to run a bot here? Spıke Ѧ 02:38 21-May-13 - I know as much as you do on Wolfgang. Maybe it's time to bring in VSTF? As for things mean things - prefer assume good faith. The poke will allow him to indicate more one way or t'other. • Puppy's talk page • 02:42 21 May 2013 I take your point. Sannse is anxious that we invite in VSTF. I replied to email today that we appreciated the help Furry gave us during the previous attack. Not sure how it would be interpreted if VSTF's patrolling extended to matters of content and taste, and were done in a way we would not have done. Spıke Ѧ 02:46 21-May-13 - From what I've seen elsewhere they're respectful of the way things are done locally. They work on obvious trolls, so taste shouldn't be an issue. And most of the members I've chatted to in the past are approachable, so if things are done poorly they're open to listen. I say bring them in - if it ends up being an issue we can always ask that they leave again. • Puppy's talk page • 02:53 21 May 2013 - Also - Hi Sims! • Puppy's talk page • 02:54 21 May 2013 Well, it is late here, and the baseball game was not interesting from the start (from the local point of view). Please keep an eye on the site. Spıke Ѧ 02:59 21-May-13 Thanks Hey simsie, thanks for unbanning me. A care package of 1 pie has been dispatched to you. I will resume editing over the summer, expect small edits right now. --The Slayer of Zaramoth DungeonSiegeAddict510 03:06, May 22, 2013 (UTC) - Congrats on fixing your signature (FINALLY). -- Simsilikesims(♀UN) Talk here. 03:09, May 22, 2013 (UTC) Sacrafice someone? Sacrafice [sic] the most spelling-deficient Uncyclopedian? After I just nominated him for Writer of the Month??? Spıke Ѧ 00:43 23-May-13 - Actually, that was just a joke, meant in the spirit of the joke just before it. Hopefully nobody's sacrificing anybody. Besides, I hadn't noticed ScottPat's spelling mistakes. Shabidoo's spelling mistakes were a little too obvious in the sentence prior to my forum post. And yes, I really was district alternative in my school's spelling bee years ago. -- Simsilikesims(♀UN) Talk here. 00:50, May 23, 2013 (UTC) - I got it. ScottPat must not have "apollagised" to you yet. (So, no one ever taught you not to write, "That isn't me"?) Spıke Ѧ 01:19 23-May-13 - Nope, and nope. Plus I have probably learned some bad habits along the way. -- Simsilikesims(♀UN) Talk here. 02:58, May 23, 2013 (UTC) - Did Spike just spell something in English-English? Wow! Well there's a first time for everything. Thanks for voting for VFH:Emu War Simsie and I'm glad someone else finds the topic of car insurance amusing. (Do you have does bloody annoying car insurance ads on TV?). Also thanks for voting on writer of the month. I'm not sure where you said sacrafice [sic.] so I haven't a clue what Spike's on about in terms of apolagising.:44, May 23, 2013 (UTC) - PS - And yes my spelling is awful but that is because I write hurrid:47, May 23, 2013 (UTC) - PPS or PSS - I've seen the forum now I thought Simsie wanted to sacrifice Al, May 23, 2013 (UTC) - We do indeed have annoying car insurance ads here. For awhile GEICO was flipping between the gecko and the caveman, and Allstate has someone talking in a bass (or deep tenor) voice like the Allstate ad person does. Nationwide has a commercial where a lady sings "Nationwide is on your side" offkey alongside the Nationwide insurance guy, hence my little addition to the article there. Of course, all this will be outdated in a few years. Then there's MetLife with the Peanuts comic strip characters, but wasn't sure where to go with that, nor do I know whether they have offices in the UK. Also, Shabidoo was the one who started all this business about Al being likely to be the first one sacrificed (and he badly misspelled sacrifice), so I thought I would joke about Shabidoo being the one to be sacrificed. Hope this clears things up a little. -- Simsilikesims(♀UN) Talk here. 02:16, May 26, 2013 (UTC) - Yeah thanks. We have none of those specific car insurance adverts mentioned. We have adverts that advertise companies that compare car insurance such as the "Go Compare" opera man jumping out of bushes and singing in people's faces and the "Compare the Meerkat" meerkats from Russia (I know!) telling you not to compare the:43, May 26, 2013 (UTC) Sotir If you are going to do something about this new article, you should first review the entire year-long career of author Maistor310, including Deleted User Contributions. There is a series of photo uploads and articles about obscure Bulgarians no one has ever heard of. The articles have been deleted; the photos should too, and this seems to be more of the same. I first posted to Romartus, as he dealt with this guy more recently; so did Xamralco. Spıke Ѧ 22:15 24-May-13 Review I sent in my League of Legends article for Pee review. More should be added to the 'Bugs' section, however, I don't know what more to add there right now. (Saddex (talk) 23:18, May 25, 2013 (UTC)) VFH Thanks for voting for the Ukraine article and the Boxes article. Much appreciated.:34, May 29, 2013 (UTC) The UnSignpost hath cometh In Pure Russian Fashion, The Newspaper That Reads) Hello I am with you for another couple of hours. I've had a talk with Pennyfeather regarding the crap that gets to remain in mainspace versus the crap that doesn't. Separately, ScottPat and I went to work on sporking and perverting Wikipedia's featured article of the day, which will certainly not be ready for an Uncyclopedia tit-for-tat before tomorrow. Spıke Ѧ 01:47 1-Jun-13 - Thanks, I'll go check out the discussion. I look forward to more good articles on here! -- Simsilikesims(♀UN) Talk here. 01:48, June 1, 2013 (UTC) Also, there is Fresh Meat for you at VFD. Spıke Ѧ 02:37 1-Jun-13 Armenian Federation Where do you think this one was sporked from (all in one Big Gulp)? Do we want it? Spıke Ѧ 18:45 2-Jun-13 A quick question... Am I allowed to slap a welcome template on the new users talk pages, if the admins don't get to them? Just wondering, I won't do anything until I get a response. Not even a peep. --The Slayer of Zaramoth DungeonSiegeAddict510 18:01, June 3, 2013 (UTC) - Yes but please include a warm friendly message and links to the help pages.:07, June 3, 2013 (UTC) - And get the inclusion of {{BASEPAGENAME}} to work, and don't drop names of specific Admins (or that of ScottPat, who isn't an Admin at all and is not some sort of official welcomer), and maybe-just-maybe look at what you've posted and see that it worked correctly before walking away. Spıke Ѧ 19:18 3-Jun-13 - In other words: Recommending specific Admins to new users is not "welcoming"; it is politicking. Spıke Ѧ 19:35 3-Jun-13 - Unless you are recommending me DSA, then I don't mind (do we have an article on False Modesty)? --:00, June 3, 2013 (UTC) - I second Romartus on reccomending me on your welcome page:22, June 3, 2013 (UTC) - Sorry I didn't respond sooner - my internet connection was down Saturday, and I was away on Sunday. But ScottPat and Spike are right; (1) Please feel free to put a welcome template on new users talk pages if they haven't been welcomed yet. Usually I use {{subst:Welcome}} to get the BASEPAGENAME to work properly on the welcome template. An alternative I used some time ago is to simply copy and paste the text of the Welcome template from the Welcome template page onto the new user's page. But using subst is a quick shortcut. (2)Avoid name-dropping, you don't speak for the admins even if you are friendly with us and recommending one admin over another or others is bad manners (it is impolite to the other admins). -- Simsilikesims(♀UN) Talk here. 00:19, June 4, 2013 (UTC) Ah thanks for clarifying, and I made it unspecific to any 1 admin. --The Slayer of Zaramoth DungeonSiegeAddict510 00:55, June 4, 2013 (UTC) Banned again! I'm afraid I banned your little bundle-of-joy again. You may wish to intervene. Spıke Ѧ 23:10 4-Jun-13 - I reviewed his contributions, and your notes on his talk page. Looks like n00bishness to me, rather than malicious behavior. I would have warned him rather than blocked him for a week, dealing with userpage templates can be difficult for beginners. Still, he should have read the rules at QVFD, somehow he overlooked the part about the redirect template. As a first time offense, this is something I might ban for one day as a consequence. -- Simsilikesims(♀UN) Talk here. 00:19, June 5, 2013 (UTC) It is more than n00bishness, but less than malevolence: a chronic need for everyone else to pay attention to him, rather than either he or we writing funny stuff. I did warn him with two hours earlier in the day. It is not a first-time offense; remember that I permabanned him (as Denza252) and you brought him back. Do what you like with him, but he is high-maintenance. Spıke Ѧ 00:39 5-Jun-13 - Taking the scorefix hijinx into account, I changed the ban to 2 days from 1 week. I think he will learn in time that to get the respect and attention he wants he will need to either write good funny stuff or do essential site maintenance (undoing unfunny IP edits for instance). We do not have a glut of users here, so I hate to drive one away just because he continues to act n00bish. -- Simsilikesims(♀UN) Talk here. 00:52, June 5, 2013 (UTC) Very well. To be clear, there was no vote-rigging involved here. And he did good work on saving an article on VFD, then on the day in question, casting the decisive 5th vote on another article--but in both cases, with a loud Victory Lap, the latter involving a post at a closed vote. Even money says he will not learn what you expect him to. Spıke Ѧ 01:24, 13:05 5-Jun-13 Er, just to clarify, the edits I'm doing as an IP isn't ban evading, just doing some reverts on spam edits... I'm not touching any article that doesn't need a simple undo. And yes, I do think that the template fiasco was a bit n00bish on my part, but I didn't want to put it in the mainspace... sorry 'bout that --The Slayer of Zaramoth DungeonSiegeAddict510 20:12, June 6, 2013 (UTC) Denza: (1) Don't tell us what ban evasion is. (2) You are not just policing articles but "removing some of my own profanity." (3) You are undoing edits and scolding editors in the Change Summary about their tastes in humor and asserting that you are the owner of the articles they are editing. Editing while banned is certainly ban evasion, and trying to maintain your control over articles is too. Spıke Ѧ 19:56 6-Jun-13 - Er... I should have phrased that better... What I should have said was "It wasn't for the purpose of ban evasion." Also, not to defy your authority, but I felt that some of the anon edits were a bit more spammy than you like. In addition, I thought that some content on my WIP article was less funny than I thought before, so I felt I should change it. If I have offended you, I'm sorry --The Slayer of Zaramoth DungeonSiegeAddict510 20:12, June 6, 2013 (UTC) - Consider this a warning: if you edit as an IP while banned, you risk getting your IP banned. If you continue to do the same thing you were banned for, you will definitely have your IP banned. Generally, when banned, don't act as if you "own" an article, since IP's don't own articles. In fact, you don't really "own" anything here, except what is in your userspace, since it is all released under Creative Commons, and may be added to by anyone. When you are unbanned, you can revert edits that don't make sense within the context of the article, or that are unfunny. - What Spike is saying here is, simply take a wiki break while banned. Take some time to read existing articles (notably the Beginner's Guide and the features) rather than edit existing articles. Some of the editors here actually use MS Word or Wordpad to edit articles offline without directly editing the wiki, and they can plan the strategy for the articles that way, though the wiki formatting doesn't necessarily come out well that way. Finally, it is impolite to scold others about their tastes in humor: everyone has different tastes in what they think is funny or not funny. That is why we hold votes to feature or delete articles rather than just have a random admin do it. That is also why we have Pee Review, though we are behind in that department currently. -- Simsilikesims(♀UN) Talk here. 21:52, June 6, 2013 (UTC) User Wakkoswish123 Wakkoswish123 is back, fresh off his ban and back to his old mischief, touting on Talk:Furry a "version 2" of the page, though his change mostly restores some crudeness and some point-of-view from his last edit to it. I reinstated his ban; would you please review the case? Also, we are mostly caught up on patrolling edits, but do you have an opinion on Armenian Federation (see above)? Spıke Ѧ 12:09 5-Jun-13 - I have left my input on the talk page of the Furry article regarding Wakkoswish123's changes. I will go check out the Armenian Federation article. -- Simsilikesims(♀UN) Talk here. 18:53, June 5, 2013 (UTC) - The Armenian Federation article appears to be about the quality of your average Uncyclopedia article. It could be funnier, but it does have some humor to it. It doesn't look like it was copied here from the fork or the mirror (I just looked into that) so it is original to this site as far as I can tell. I am letting it stay without a construction tag. -- Simsilikesims(♀UN) Talk here. 18:57, June 5, 2013 (UTC) Fine on both counts; except: - Wakkoswish123 cannot develop an alternative in userspace. Because I banned him. If you think he'll listen to you, unban him; but please review the ban log: When his last ban expired, he picked up right where he left off. - Regarding your post-edit to Furry: Changing "inhabit" to "can be found inhabiting" is one of those encyclopedia clichés that set me off. Say it simply! Spıke Ѧ 19:04 5-Jun-13 - He is banned for six months, I have a feeling he will return after 6 months. If he hasn't learned his lesson then, he'll get a year. Hopefully he will see the reply on the talk page. I will undo my change, since you find it cliched, I didn't see much difference, really in the wording. -- Simsilikesims(♀UN) Talk here. 19:11, June 5, 2013 (UTC) Infobox Myself, I love the infoboxes at the country pages. They are filled with lots of random information. The image wasn't broken, it some sort of bug now on Wikia (I have experienced it on my "main" wiki, but it didn't take too long time until they were visible), just click on the "broken" sign, which will lead you to its page. I will upload a photoshopped flag later. (Saddex (talk) 22:36, June 5, 2013 (UTC)) - There used to be lots of infoboxes on country pages; they were removed because IP's tend to add to the infobox lists, particularly the section on Imports and Exports, ruining the infoboxes effectively. Besides, the infoboxes even became the subject of editing wars, since people couldn't agree what should or shouldn't go in there. Also, in general, avoid editing featured articles like America; they are already good the way they are. Besides, adding a map takes away the focus of the article from the fast food angle and moves it to cartography. If the article had a more general focus, the map would have been ok, but the reader's attention is instantly pulled to the map and away from the text, which is undesirable. -- Simsilikesims(♀UN) Talk here. 22:45, June 5, 2013 (UTC) - Yup. Bizzeebeever's documentation at Template:Infobox notes that the editor can use an Infobox for good or eee-vil. BB also begs the editor that he doesn't have to fill out every single field if he doesn't have something authentically clever and funny to put there. Indeed, the bigger the Infobox the more magnetic the attraction for Anon to add crap to it. - Dittos on editing featured articles. You can bring them up to date but it is presumptuous to think your arrival is going to "make it funny" at long last. - Speaking of adding crap, I reverted virtually all of BetterSkatez2012's edits yesterday, explaining why on his talk page. He is back again, adding listcruft (I think not identical listcruft, but not better listcruft) at Roller coaster with Change Summary: please don't revert. Would you like to take a look? Spıke Ѧ 22:59 5-Jun-13 - However, a flag will still be added. I have planned to add a hamburger and a fat man on a tractor in it. The article was in perfect state? No. The first sentence, before I edited it, called America a "badass country". "Badass" can sometimes be defined as a somewhat positive term. No articles should be pro-subject. They should joke and indirectly insult the subject via jokes, lies and randomness. (Saddex (talk) 23:04, June 5, 2013 (UTC)) - No, some articles can be pro-subject, they don't all have to insult the subject, as long as they are still funny. Take for instance, George W. Bush which deliberately praises the subject, who many think was one of America's worse presidents. Likewise, the article on Michael Jackson deliberately takes the fanboy/fangirl point of view, praising Michael Jackson and being funny by being totally blind to the pedophile charges. Both were collaborations, and were deliberately planned out with a comedy strategy. Pay attention to see if you can determine what comedy strategy was used before editing the article. You can add a flag to the America article provided (1) it isn't so large that it overpowers the text; and (2)The picture is quality, not a bad photoshop or worse, an MS Paint product. A feature article deserves a good image. See Uncyclopedia:How To Be Funny And Not Just Stupid THE IMAGE VERSION for more details. -- Simsilikesims(♀UN) Talk here. 23:12, June 5, 2013 (UTC) - The featured version of the article (2008) began, "America is the name of the world's largest fast food restaurant." Someone later changed it to "largest badass country." This could be fanboyism and is unencyclopedic, but there was some fun going on here, as the first example of baddassness was Bedford-Stuyvesant, a district no one feels is bad-ass. Your opinion above--or, in the Change Summary, It should instead be offensive to Americans--is absurd. There is no orthodoxy on whether an article should be flattering or unflattering. It should be encyclopedic and funny--and a dozen Uncyclopedians in 2008 voted that it was. In fact, an unflattering article is less likely to amuse its most typical reader unless it insults with delicacy and skill. Spıke Ѧ 23:17 5-Jun-13 Wait, I got an idea... I have created a page in my user namespace, User:Saddex/America, in order to bring the infobox back to life, and just construct it for fun. Then I got an idea. Why not try to fill it with so much funny things as possible, lock it, and then mirror it to the America page? (Saddex (talk) 23:55, June 5, 2013 (UTC)) - Sure, you can have fun with it in your userspace, and edit the userspace version as much as you want as long as you don't mess up the mainspace version. -- Simsilikesims(♀UN) Talk here. 23:57, June 5, 2013 (UTC) - In fact, that is a great idea, especially if your point-of-view is different from the established article; and even a Featured Article can have a template pointing to "alternate version" or "adversarial version" and vice versa, plus disambiguation pages or whatever it takes. Spıke Ѧ 00:00 6-Jun-13 - If you, SPIKE, thought of a link to an alternate version, then it wasn't exactly that I meant. I wondered if we could fill the infobox in the userspace page with so much funny stuff as possible, then lock it when we are satisfied, and then mirror it to America. That would protect from anons (with bad skills), spammers, fanboys and so on. (Saddex (talk) 00:06, June 6, 2013 (UTC)) - I see. A page-specific Infobox template could receive separate protection and does solve the problem of Anons extending it to infinity with crap. But giving protection to a piece of a page would be unusual. What I thought you meant solves the separate problem of you having a different comedy concept from the creators of the featured article. Spıke Ѧ 00:18 6-Jun-13 - Hmm, so as I was understanding what you meant, you were changing what you meant! Separately, a map that works in my Humble Opinion is at Isle of Man. Spıke Ѧ 00:29 6-Jun-13 - What do you think about the infobox now? User:Saddex/America (Saddex (talk) 00:42, June 6, 2013 (UTC)) - I don't like the religion part - Islam is actually one of our religions, just not the top religion (which would probably be Protestantism, with Catholicism a close second). Dollarism, or maybe just capitalism, has some truth to it, and would be funnier. -- Simsilikesims(♀UN) Talk here. 00:51, June 6, 2013 (UTC) - Chuck Norris and non-huffable kitten are meme-cruft. Get rid; you are not practicing comedy but imitation with them; they have nothing to do with America nor, any more, with Uncyclopedia. "Emperor" George W. Bush has been out of power and discredited for five years and obviously has nothing to do with anything, unless you are about taking sides. Putting either Obama or a Republican in the Infobox will date the article and make the reader wonder where you're coming from. "Ethnic groups"? Get rid! America as a melting-pot is well-documented; if you have something new to say about this, apart from specific stereotypes on specific groups such as Mexicans, you haven't done so and it must be especially clever. "Largest city: Guantanamo Bay"? You know that's not true, and it's not funny unless you are getting into right-versus-left politics, and even then just stating the name isn't funny unless you have a point to make, and doing so will alienate half your readers unless you are unusually smooth at it. The funniest thing about Gitmo, unless you are an expert on war policy, is that Obama said he'd shut it and didn't, and you can't do anything with that in an Infobox. "Religion: Islam" is likewise not true and as we say, "Untrue ≠ Funny." You are trafficking in stereotypes, perhaps convinced that stating one is all you have to do, or perhaps that it doesn't matter provide you insult the American reader. Please step back and plan a comedy concept for the entire page: A misimpression of America that all your humor will feed. Spıke Ѧ 01:01 6-Jun-13 Listcruft Thank you for intervening with BetterSkatez2012. I haven't studied what you changed at Listcruft, but if it is motivated by his listcruft, then you should also visit HTBFANJS (Sec.8, Avoid Lists (nearly all the time)), a section I created recently to hold and unify advice mostly given in other places. Spıke Ѧ 23:47 5-Jun-13 - Listcruft is an article created to illustrate listcruft, and I decided to add a bit more to the lists there, which furthers the purpose of the article, and possibly updates it slightly. I already learned the hard way to avoid lists - one of the articles I created as a beginner a couple years ago was voted down on VFD - 100 Worst Ice Cream Flavors. Each piece of the list had at least one sentence to go with it too, but that wasn't enough to keep it from being killed as listcruft. I have mostly avoided listcruft ever since. -- Simsilikesims(♀UN) Talk here. 23:54, June 5, 2013 (UTC) OK; all I'm saying is that, if you were after a clearer policy statement, you should make it where authors expect to see policy statements, at HTBFANJS. My only casualty was Peruvian slang, an unabashed attempt to explain some of the inside jokes at Peru--that is, to educate not amuse. Spıke Ѧ 00:03 6-Jun-13 What do you think about this image? (Saddex (talk) 22:01, June 6, 2013 (UTC)) - I think it would work for an alternative version of the America page, especially for the infobox, but I'd rather not see it on the featured version. -- Simsilikesims(♀UN) Talk here. 22:07, June 6, 2013 (UTC) - User:Saddex/America - What do you think about it right now? :P (Saddex (talk) 23:52, June 7, 2013 (UTC)) 2 new questions from, you guessed it, DSA510! Er, I have 2 questions (no I'm not going to do a you have 2 cows joke), so, here they are, in glorious 720p, 1080p 4K definition!: - Can I make a welcome template of my own to use for new users? - Can I make a little comment symbol (to go along with the for, against, and abstain symbols? I'm fine if the answer to either question is NYET, and take your time to answer this, I'm in no hurry, yet. --The Slayer of Zaramoth DungeonSiegeAddict510 15:54, June 7, 2013 (UTC) - I said I can wait... but I can't wait forever! --The Shield of Azunai DSA510My Edits! 04:50, June 11, 2013 (UTC) (Newsig, btw) - Yes, you can make a welcome template of your own to use for new users, but make sure it contains all the links to the same pages that the current welcome template links to (Beginner's Guide and by extension HTBFANJS for instance) and make sure it is polite to new users. You can also make comment symbols to go along with the for against and abstain symbols, but make sure it has the same function as a for symbol, or an against symbol. Making a new abstain symbol might be too confusing. Make sure your symbol isn't too big (it should be the same size as the for against and abstain symbols), and make sure that the template is formatted correctly, test it well before using. Same with your custom welcome template. -- Simsilikesims(♀UN) Talk here. 05:28, June 11, 2013 (UTC) Yomamen New user Yomamen has created a user page which is either (1) a resumé or (2) subtle humor or (3) loads of laughs, but most found on someone's Facebook page. Denza and I debate it on User talk:Yomamen. Would you lend a third pair of eyes? Spıke Ѧ 21:16 7-Jun-13 Godaddy12121212 Thanks for your review of the last one; now another one for you. Even newer user Godaddy12121212: Who is he, and why is he developing template after template, seemingly to help us manage the QVFD process? Spıke Ѧ 21:54 7-Jun-13 As he's actually a user, I'm unsure whether to add him to QVFD or try to move stuff into his user-space. I would say QVFD, as the titles seem to be somewhat asking for it, but what do you two think?Sir Reverend P. Pennyfeather (fancy a chat?) CUN VFH PLS 22:13, June 7, 2013 (UTC) - Sorry, just realised what he's doing. Do we really need the template? It seems to be like VFD but without any element of democracy. Personally, I think the current system seems fine. Sir Reverend P. Pennyfeather (fancy a chat?) CUN VFH PLS 22:21, June 7, 2013 (UTC) - It would just be sort of in-between the two deletion mechanisms. In most QVFD cases, the author's input should be discouraged, as they'd probably just rant before their inevitable ban. My vote, for what it matters, is to politely tell this new user that we prefer our own, well-established and functional mechanisms to an untried, impractical one. Sir Reverend P. Pennyfeather (fancy a chat?) CUN VFH PLS 22:28, June 7, 2013 (UTC) - You can tell me if I was sufficiently polite on his talk page. I appreciate your comments. Wikipedia is bigger and slower than we are, and our process is biased toward deletion (and retrieval from the mag-tapes after-the-fact if the vandal bastard should register and ask for a copy in his userspace, which is always granted unless it's something like cyberbullying). Spıke Ѧ 23:23 7-Jun-13 - I think you've handled it just fine. I'm sure it was well meant, but it certainly is an odd thing to do, going around wikis adding unsolicited deletion templates. Sir Reverend P. Pennyfeather (fancy a chat?) CUN VFH PLS 08:43, June 8, 2013 (UTC) The IP vandal is using a bot or something, I can't revert everything. Please help. --The Slayer of Zaramoth DungeonSiegeAddict510 04:37, June 10, 2013 (UTC) - Crisis has been averted --The Shield of Azunai DSA510My Edits! 18:19, June 10, 2013 (UTC) Team masturbation This first effort of a new Uncyclopedian is not predictably awful. But it is awful. And especially, some of the red links worry me that it is the first in a planned "story arc." What, if anything, do you reckon should be done with it? Spıke Ѧ 23:30 10-Jun-13 - Eh, I have been looking at that article for a while, and I have been tempted to put it in the users userspace... but I wasn't sure what to do. I think we should slap an ICU tag on it and wait. --The Shield of Azunai DSA510My Edits! 23:42, June 10, 2013 (UTC) - I agree with DungeonSiegeAddict510 here, so I put the ICU tag on it, and we'll see if it gets funnier and less meme-y. -- Simsilikesims(♀UN) Talk here. 04:33, June 11, 2013 (UTC) Adventure Quest With my blessing (at Talk:Adventure Quest), Uncycloperson nominated this on VFD, then undid his nomination, perhaps because of a typo. If he wants to go through with it, he should be allowed to despite the tag you placed on the article. Spıke Ѧ 20:45 11-Jun-13 Denza252 DungeonSiegeAddict510 uploaded a drawing of his with a scolding to other Uncyclopedians not to use it. I deleted the upload, and banned him for 2 hours for scolding us over a nonexistent rule. He has just apologized on my talk page as Denza252--it seems that he was gaming us with the time-consuming hoax that this account was permanently hacked and unusable. I have permabanned the Denza252 account and am ready to permaban DSA510 for ban evasion unless you have a much better idea. Spıke Ѧ 23:06 13-Jun-13 - I concur on permabanning the Denza252 account; it is either hacked and not being used by the original user, or else it is being used as a sock by DSA510. However, I would not go so far as permabanning DSA510 for ban evasion; I would recommend a couple weeks to a month, as an extension of the latest ban. Remember, Puppy on the Radio did ban evasion some time ago with his sockpuppet, but was banned for three months, not ultimately permabanned. Also, this ban evasion is not as serious as that incident with POTR since DSA510 is not attempting to evade the rules of a competition or rig any votes. I have just extended his ban myself. -- Simsilikesims(♀UN) Talk here. 23:13, June 13, 2013 (UTC) (Although he did cast one vote on VFD with the Denza252 account, which I reverted as at least voting-while-banned.) It is frustrating as he has assembled a nice record of good patrolling, which somehow never gets beyond the recurring need to call attention to himself with rulebreaking. I'll go with the lower of your range, as I never thought he would do good 'anything' in the first place. (Edit-conflicted and now moot.) If you think there is real ambiguity whether this is ban evasion, we should find out for sure. Spıke Ѧ 23:25 13-Jun-13 - I'm sorry to evade my ban like this but I DungeonSeigeAddict510 is not the real Denza. I am the real Denza and my account is Denza252. If you could PLEASE unban Denza252 and place his ban on the imposter I will switch back to Denza252 and I will never use this account again. Sorry to get you caught up in this Denza deboccle. DungeonMaster494 (talk) 23:29, June 13, 2013 (UTC) (Edit-conflicted and superseded) You are going to love the post to my talk page just now. We cannot sort this out without checkusers. Spıke Ѧ 23:31 13-Jun-13 - I agree: We should run checkusers on Denza252, DungeonMaster494, and DungeonSeigeAddict510. I do think that this is getting too complicated and ambiguous. ---- Simsilikesims(♀UN) Talk here. 23:35, June 13, 2013 (UTC) Okay, for what it's worth, which I assume isn't all that much, the DungeonSiegeAddict510 account, from my experience, is the real person here. A few months ago a bunch of IRC users thought it would be funny to completely fuck around with Denza and I'm personally surprised at his determination to stick through it after the kind of abuse they've forced him to go through. I speak with him daily on the darthipedia IRC (as he's been put on auto-kick from the uncyclopedia IRC by one of said jokesters from before), and it's pretty easy to tell that the Denza I'm talking to every day is the socially-awkward and self-promoting good patroller that we've come to know here and on the fork as Denza. As far as I know and he has told me, DungeonSiegeAddict510 is the only account that he uses. I was present during the time when his account was "hacked." He was actually fooled into giving up his password to another user for "analytical purposes" or something like that, which certainly doesn't do great testament to his logical reasoning abilities, but certainly the users tormenting him saw an opportunity and took it. He's active on the fork and a good contributor. Has even attempted some writing a few times. And there he's active under the same account DungeonSiegeAddict510. Naturally I'm here because he told me about this situation on IRC, which he always does, about everything. On both wikis. It's really quite maddening. But he's not a malicious person. He just wanted to become a member of this community and he still does, and he's been punished for that by enterprising trolls who've allowed this situation to carry on a lot longer than their ages would bely of their maturity. You can do what you want with him as like I've said and told Denza, I don't know how much weight my input holds, but this is what I know to be the truth and I don't like to see an honest contributor ram-rodded this way. So I'd ask that you cut the kid a break, although certainly do the checkusers and such. Thanks. -RAHB 00:16, June 14, 2013 (UTC) - For what it's worth: - My sockpuppetry wasn't for the purposes of competition/vote rigging - I actually scored my sock deliberately lower in a competition, and avoided any vote rigging. There was a bit more behind the ban for me at that stage though. I actually was permabanned for socking. The ban was modified due to me being long term member with generally good history blah blah. - Funnybony also had (has) a sock account that - if I remember rightly - belonged to his partner, but ended up double voting at one stage. He was asked to not do it again when it was discovered. That sock is still unbanned, but rarely active. - MrN has a sock he uses regularly, as do other regular users. Some are more obvious socks than others. - My major issue with socking is either vote rigging or drama inciting. I don't know of any vote rigging in this instance - being only here irregularly at the moment - but there's definite drama. I've never relied on IRC to be an effective communication channel - the lack of accountability is an issue - but a check user is probably the best action going forward. Even if there was an issue with an imposter sockpuppet (which is just an odd concept), I'd want to have all the information I could possibly have before making a decision on what to do. Especially as it appears we have a determined noob with some writing potential here. • Puppy's talk page • 12:37 14 Jun 2013 - I should also mention User:PortuguseOttersTryRadios while I'm at it. • Puppy's talk page • 12:43 14 Jun 2013 Puppy, my friend, dragging us through your personal criminal record is a distraction. I am willing to believe that pranksters at the Fork (perhaps the same ones who wound Cat up to argue that this website is Auschwitz because I ban everyone and it's scary) saw me ban DSA510 and pounced. One of them I have permabanned as he cannot spell "Siege" whereas DSA510 sees the word all the time when he is not masturbating. I am convinced there is ambiguity on the ban evasion issue, though checkuser is a good idea. Spike sockpuppet (talk) 00:58, June 14, 2013 (UTC) - I have unbanned DungeonSeigeAddict510 pending a checkuser, because it is apparent that the ban evasion may not have been done by the same person. Should the checkuser reveal that there was ban evasion, the ban will be reinstated with interest. -- Simsilikesims(♀UN) Talk here. 01:03, June 14, 2013 (UTC) Correct on both counts. Meanwhile, we should turn the evening's gigantic diversion from writing funny stuff into an inside joke by adding to the Worst 100 Events of 2013 that I banned #494 merely for inability to spell Siege. Simsie, your own inability to spell Siege we will discuss later. Spıke Ѧ 01:16 14-Jun-13 Not to keep harping on this, but Denza says his IP is still banned, probably from that #<insertnumberhere> thing that happens when people get banned. -RAHB 01:50, June 14, 2013 (UTC) Now its my turn to talk. First of all, I would never use sockpuppets to apologize or edit during a ban, as I learned from my previous experience of editing while banned.(It got my school's IP banned) Second of all, I have said quite a few times, and will mark it on my userpage after this, that I only use 2 IPs, save the occasional edit during a trip to somewhere, which I may or may not have done. (I honestly can't remember if I edited uncyc during vacation to somewhere or not) I don't use proxies, ever. It goes against my code. Another detail about my usernames, is that they have the gamertag effect, or some digits slapped on the back. Now, I only use 4 main number codes, 252, 242, 510, and 525. If anyone really wants to dig superdeep on this issue, there are a few accounts that will back this up. First of all, my IRC account is TheFakeazneD525, after my original nick Denza252 got stolen. My email address has 242 on it. My armorgames account, is DarkeSpyne242 (with the profile pic that looks like a female ninja.) I don't keep many accounts, save the email, this AG, and IRC, and nothing else (save a defective TvTropes account.) Yet another topic I will say, to prove myself, is that I never write in the style that the person claiming to be Denza252 does, for example, in VFD, he was like "I agree with SPIKE 100%" or something. I never write like that. I would say Good X, interesting y, and possibly a Per above, along with some of my own Denza-esque humor. Now, the fact that this account is technically a sockpuppet is a rare exception to my code. I did it, as there was no other way to get back on Uncyclopedia. Otherwise, I would never create a sockpuppet. Also, the assumption that this was all a hoax and that I really did have access to the account Denza252 is actually a bit hurtful. I would never do such a thing, and the things that I had to do after that were long, and tiresome. Regarding the image, I may have been a bit too paranoid about it. I now feel that I should have said something along the lines of "You may use it, but do not abuse it" or something like that instead of "Do not use without my consent." I had worked on that particular sketch for a while, and I sometimes get a bit too defensive of things that I have made. I will re-upload the image with a revised description. And also, I have been monitoring the recent changes this whole time, and I saw that SPIKE added a bit about banning a user for misspelling "Siege" (among other things, like account impersonation, and sockpuppeting, and hacking my account, I presume) I feel that there should be a small bit about the whole "Denza Saga" as it is now named in that list, as it is quite empty as of right now, and a few events wouldn't hurt. After all, they are the WORST reflections on 2013. Anyways, if I have missed anything, you can contact me on my talk page. --The Shield of Azunai DSA510My Edits! 02:53, June 14, 2013 (UTC) - Jeez! you should run for President. Because all of your actions are justified except (1) the ones you can't remember, (2) the ones you simply had to take to return to Uncyclopedia during a ban, and (3) a few that in retrospect you might have done differently. And all the others are exceptions. And you might have a psychosis. Unlike all the other Admins you have bothered on both sides, I do not care whether this particular dog elected to have fleas (the trolls that follow you around); I simply do not want the dog and his fleas in the house. So I disagree that "its [your] turn to talk." "Its" your turn to do work that advances this website. PS--Am modifying your message to remove your e-mail address to make it harder for robot spammers. Spıke Ѧ 12:07 14-Jun-13 - Agreed with SPIKE on there. You've been given several extra chances and done the extra campaigning so to speak of being able to even get into the house despite the fleas, now make it worth something. And also don't make me embarrassed of my glowing endorsement from earlier. "It's my turn to talk." Jeez, man, you'd think he'd get the picture by now >_< -RAHB 19:36, June 14, 2013 (UTC) Gymleader Melchizedek New user Gymleader Melchizedek seems not to want to help us produce funny pages, so much as to leave a personal mark at the top of a lot of them, always involving initial quotations, none of which are horrible but neither invite the reader into the page. A second opinion, please? Spıke Ѧ 15:38 14-Jun-13 - I reverted the quote left at Feminism and reviewed the video. The video seems ok, and the uploading of it may have been yet another bug. I also left a note on the user's talkpage below your note. Leaving quotes on pages is a typical n00b mistake. -- Simsilikesims(♀UN) Talk here. 21:14, June 14, 2013 (UTC) Summer's here! And so is the post Telling You Stuff You Already Knew, But With Different Words! June, 14th 2013 • Issue 186 • This newspaper may not be able to tell you who Denza but then it can't tell you much anyway! He must think highly of it; there are two signatures! Like an edition of a newspaper with * * in the masthead. Spıke Ѧ 13:34 16-Jun-13 IFYMB! I received a message from this user about lifting a three month ban and an apology for past actions. I decided to lift it but I suggest I will put him on probation for the last two months of his sentence. I will add this to UN:OFFICE to make it clear for future:35, June 16, 2013 (UTC) - I don't remember the issues and haven't yet looked them up, but I object. If the ban was warranted, then cancelling it on appeal of the bannee--an appeal made during an act of ban evasion--is improper. I understand that the alternative is to shop for a willing Admin on the other side and use him as a proxy, and that's wrong too. (And cheers that RAHB limited his recent intervention to giving us helpful background information we didn't have, above.) This recurring issue is my only policy disagreement with youse in four months, and I don't claim that you should switch to my style; only ask yourselves why we can't have things (rules, bans, etc.) mean what they say. Spıke Ѧ 13:34 16-Jun-13 - Ok I understand the objections for lifting the ban on IFYMB! It was a stupid thing for him to have done on your user pages but he has apologised. I also apologise to both you and Sims for not asking you first. Hope we can close this issue and then judge the user on their subsequent actions post-ban.:32, June 16, 2013 (UTC) - What he did was blatant vandalism, and I banned him before he could extend it to the rest of the wiki. It was reminiscent of what a vandal had done not long before on the fork. He has a record of positive contributions before the fork happened, so the UN:OFFICE rule is a good idea, but I thought I recalled his vandalizing more than just a couple user pages. I will have to check his contributions to be sure. I have cancelled a ban before on apology and appeal of a bannee before, so I can understand your decision to do the same, Romartus. But this should not be done automatically, they have to be credible in their apology. As to rules, we have three of them, plus a number of "ignorable policies". All of the ignorable policies should be referenced to in the Beginner's Guide. -- Simsilikesims(♀UN) Talk here. 01:39, June 17, 2013 (UTC) - Thank you Sims. Acknowledged. --, June 17, 2013 (UTC) ZhenBang I gave feedback to new user ZhenBang but he bulled forward with uniformly vulgar edits, culminating in the awful UnNews:The IRS shall take your cash by force!, which would be "mercy-moved" immediately if I were still Editor-in-Chief. Would you care to try with him? Spıke Ѧ 23:19 18-Jun-13 - I just left him some feedback on his UnNews on his talk page. -- Simsilikesims(♀UN) Talk here. 08:21, June 19, 2013 (UTC) Checking InHey Sims, I sent you an email a few days back. Did you get it? Just wanted to make sure I sent it to the write email. (I used what you have stored with Wikia). Let me know, thanks! --Sarah Manley (talk) 21:34, June 19, 2013 (UTC)
http://uncyclopedia.wikia.com/wiki/User_talk:Simsilikesims?oldid=5708064
CC-MAIN-2015-32
en
refinedweb
How to call BPEL from java Client code?843830 Feb 22, 2008 3:44 AM I have created one Bpel in netbean 6.1 and want to pass parameters from java client and get back the result.If Any one know how to invoke bpel from java client code do reply soon with java sample code? Edited by: Avi007 on Feb 22, 2008 1:43 AM Edited by: Avi007 on Feb 22, 2008 1:43 AM This content has been marked as final. Show 8 replies 1. Re: How to call BPEL from java Client code?843830 Feb 22, 2008 12:35 PM (in response to 843830)This is no different from calling any webservice from Java code. your BPEL implemented the service defined in the WSDL. I am assuming you are aware of service assembly and JBI. Use the composite application project type to create the service assembly and deploy the project to the appserver. There are lots of examples/blueprints/videos on doing this stuff. Your WSDL should have some binding section, lets say http. Now from your Java Client call this webservice using JAX-WS or any other mechanism. -Kiran B 2. Re: How to call BPEL from java Client code?843830 Feb 22, 2008 12:52 PM (in response to 843830)Using netbeans you can create a java application project. Right click on the project and select New Web Service Client. This will open up a dialog box, select the WSDL for the BPEL Process. This would generate necessary classes that would allow you to create message and call the web service. I dont have handly examples, but this will get you started. 3. Re: How to call BPEL from java Client code?843830 Mar 30, 2008 12:28 AM (in response to 843830)The annex to prepare a code from an example in NetBeans (SynchronousSample) Le anexo un c�digo que prepare a partir de un ejemplo en NetBeans (SynchronousSample): WSDL code: Java client code with Axis <?xml version="1.0" encoding="UTF-8"?> <definitions name="SynchronousSample" targetNamespace="" xmlns="" xmlns: <types> <xsd:schema <xsd:import </xsd:schema> </types> <message name="SynchronousSampleOperationRequest"> <part name="a" type="xsd:int"/> <part name="b" type="xsd:int"/> </message> <message name="SynchronousSampleOperationReply"> <part name="r" type="xsd:int"/> </message> <portType name="SynchronousSamplePortType"> <operation name="SynchronousSampleOperation"> <input name="input1" message="tns:SynchronousSampleOperationRequest"/> <output name="output1" message="tns:SynchronousSampleOperationReply"/> </operation> </portType> <binding name="SynchronousSampleBinding" type="tns:SynchronousSamplePortType"> <soap:binding <operation name="SynchronousSampleOperation"> <soap:operation/> <input name="input1"> <soap:body </input> <output name="output1"> <soap:body </output> </operation> </binding> <service name="SynchronousSampleService"> <port name="SynchronousSamplePort" binding="tns:SynchronousSampleBinding"> <soap:address </port> </service> <plnk:partnerLinkType <!-- A partner link type is automatically generated when a new port type is added. Partner link types are used by BPEL processes. In a BPEL process, a partner link represents the interaction between the BPEL process and a partner service. Each partner link is associated with a partner link type. A partner link type characterizes the conversational relationship between two services. The partner link type can have one or two roles.--> <plnk:role </plnk:partnerLinkType> </definitions> package esbclientapplication; import javax.xml.namespace.QName; import javax.xml.rpc.ParameterMode; import org.apache.axis.Constants; import org.apache.axis.client.Call; import org.apache.axis.client.Service; import org.apache.axis.constants.Style; /** * * @author Julian David Rojas Jimenez (juliandrj gmail com) */ public class Main { final static String NAMESPACE = ""; /** * @param args the command line arguments */ public static void main(String[] args) { try { Service service = new Service(); Call call = (Call) service.createCall(); call.setTargetEndpointAddress(""); call.setOperationStyle(Style.RPC); call.setOperationName(new QName(NAMESPACE, "SynchronousSampleOperation")); call.addParameter("a", Constants.XSD_INT, ParameterMode.IN); call.addParameter("b", Constants.XSD_INT, ParameterMode.IN); call.addParameter("r", Constants.XSD_INT, ParameterMode.OUT); call.setReturnType(Constants.XSD_ANY); String result = call.invoke(new Object[]{"1000", "123456"}).toString(); System.out.println(result); } catch (Exception ex) { ex.printStackTrace(); } } } 4. Re: How to call BPEL from java Client code?843830 Jul 1, 2008 10:46 AM (in response to 843830)Krian: There are many blueprint, exercises and examples, but they are very specific, do not go directly to attack various developments, for example, I created a BPEL and asynchronous, with correlation and assume instead connected to webservices. My problem occurs when creating client of the webservice BPEL, and that client of webservices I have no problem in the buildup. I have a lot of time trying to find out what happens, look blueprints, examples, etc.. But I fail to fix the error and is as follows: *[WARNING] ignoring operation "AsignacionOcrOperation": message part does not refer to a schema type declaration* line 21] Port "AsignacionOcrPortTypeBindingPort" does not contain any usable operations* line 46] Service "AsignacionOcrService" does not contain any usable ports. try running wsimport with -extension switch.* line 45 generating code... Krian, if you have any idea that I could give would be fabulous, if you need the sources would have problems in sending the draft codes BPEL. ... Thanks in advance. 5. Re: How to call BPEL from java Client code?843830 Aug 18, 2008 4:04 AM (in response to 843830)Is it a working example/code - anybody tested it? 6. Re: How to call BPEL from java Client code?843830 Feb 19, 2009 9:23 AM (in response to 843830)Hi, It doesn't work for me since the client can't find the endpoint of the process. I test BPEL with tests in Composite App, but can't call it as a regular Web Service. It seems that address is deferent from Web Service deployment. Usually, the addres looks It doesn't help if I try Probably the port or address generation is different for JBI modules. Any ideas about the right address? 7. Re: How to call BPEL from java Client code?843830 Feb 20, 2009 4:39 AM (in response to 843830)I found it at last >:-| The right address is This differs a lot from the addresses for regular Web Services. I wonder why. Andrey 8. Re: How to call BPEL from java Client code?843830 Apr 5, 2010 2:07 AM (in response to 843830)Hi, I need a urgent help on this. I tried writing the client same way and every time I am getting following exception - org.apache.axis.message.SOAPFaultBuilder.createFault The soap body in brief ---- <binding name="HelloWorldBinding" type="tns:HelloWorld"> <soap:binding <operation name="process"> <soap:operation <input> <soap:body </input> <output> <soap:body </output> </operation> </binding> I doubt I am making some mistake on call.addParameter() or call.setReturnType() or call.invoke() . I am trying to use the same client as mentioned setting all axis / soap / mail / activation related jars in the classpath. Code snapshot ----- call.setTargetEndpointAddress(""); call.setOperationStyle(Style.DOCUMENT); call.setOperationName(new QName(NAMESPACE, "process")); call.addParameter("a", Constants.XSD_STRING, ParameterMode.IN); call.addParameter("r", Constants.XSD_INT, ParameterMode.OUT); call.setReturnType(org.apache.axis.Constants.XSD_STRING); String result = call.invoke( new Object[] { "a"}).toString(); your urgent help is much appreciated.... Tx, Joy...
https://community.oracle.com/message/6646765
CC-MAIN-2015-32
en
refinedweb
_lwp_cond_reltimedwait(2) - control device #include <unistd.h> #include <stropts.h> int ioctl(int fildes, int request, /* arg */ ...); The ioctl() function performs a variety of control functions on devices and streams. For non-streams files, the functions performed by this call are device-specific control functions. The request argument and an optional third argument with varying type are passed to the file designated by fildes and are interpreted by the device driver. For streams files, specific functions are performed by the ioctl() function as described in streamio(7I). The fildes argument is an open file descriptor that refers to a device. The request argument selects the control function to be performed and depends on the device being addressed. The arg argument represents a third argument that has additional information that is needed by this specific device to perform the requested function. The data type of arg depends upon the particular control request, but it is either an int or a pointer to a device-specific data structure. In addition to device-specific and streams functions, generic functions are provided by more than one device driver (for example, the general terminal interface.) See termio(7I)). Upon successful completion, the value returned depends upon the device control function, but must be a non-negative integer. Otherwise, -1 is returned and errno is set to indicate the error. The ioctl() function will fail for any type of file if: The fildes argument is not a valid open file descriptor. A signal was caught during the execution of the ioctl() function. The stream or multiplexer referenced by fildes is linked (directly or indirectly) downstream from a multiplexer. The ioctl() function will also fail if the device driver detects an error. In this case, the error is passed through ioctl() without change to the caller. A particular driver might not have all of the following error cases. Under the following conditions, requests to device drivers may fail and set errno to indicate the error The request argument requires a data transfer to or from a buffer pointed to by arg, but arg points to an illegal address. The request or arg argument is not valid for this device. Some physical I/O error has occurred. The fildes argument is on a remote machine and the link to that machine is no longer active. The fildes argument is not associated with a streams device that accepts control functions. The request and arg arguments are valid for this device driver, but the service requested can not be performed on this particular subdevice. The fildes argument refers to a valid streams device, but the corresponding device driver does not support the ioctl() function. Streams errors are described in streamio(7I). See attributes(5) for descriptions of the following attributes: attributes(5), standards(5), streamio(7I), termio(7I)
http://docs.oracle.com/cd/E26502_01/html/E29032/ioctl-2.html
CC-MAIN-2015-32
en
refinedweb
You can subscribe to this list here. Showing 1 results of 1 David, > > That is a good point. It is true that today overflow notification > > is requested via the PMC and not the PMD. The implementation assumes > > (wrongly) that PMCx corresponds to PMDx. The flag is recorded in > > the PMD related structure. Hence, it would seem more natural to > > pass the flags for RANDOM/OVFL_NOTIFY via PFM_WRITE_PMDS. I did it > > via PFM_WRITE_PMCS because I considered those flags as part of the > > configuration of the counters, hence they would go with PMC. For the > > PPC64, it looks like you are in a situation similar to P4, where > > multiple config registers are used to control a counter. We could > > move the flags to PFM_WRITE_PMDS. > > I think that would make more sense. Or maybe even a different > mechanism entirely. How would you support performance monitor event > that aren't counter overflows, for those CPUs that have such? >. I also assume that the PMC selection determines the PMD. Certain events can be measured on any PMC register. No matter what, I think a tool would need to find out the association PMC -> PMD. This can be provided by a user level library. I think your proposal makes sense. The Itanium PMU model is very clean in that regards, making work fairly simple for tools. I guess I will have to revise this in my tools/libraries.). > > > PFM_WRITE_PMCS > > > > > > The documentation says that the PFM_MAX_PMD_BITVECTOR can vary between > > > PMU models. But what the value of this is for the current PMU model > > > is not exported anywhere. Varying by architecture doesn't make much > > > sense, since PMU model details vary only mildly more between > > > architectures than they do within CPU models of one architecture. > > > > > The PFM_MAX_PMD_BITVECTOR is exported in the perfmon.h header file. > > At this point, it is provided by each architecture. When the > > processor architecture is nice, then the PMU framework is specified > > there and it makes the job of software easier. For instance, on Itanium, > > the architecture says you can have up to 256 PMC and 256 PMD registers. > > Having this kind of information is very useful to size data structures > > approprietely. You don't want to have to copy large data structures > > (think copy_user) if you don't have to. Are you advocating that this > > be a PMU model specific size? > > Having it per-architecture doesn't really make a lot of sense, since > PM units vary only slightly less between CPUs of the same architecture > than they do between CPUs of different architectures. The PM unit may Well, I would cite Pentium III/Pentium M vs. P4/Xeon, that's is quite a drastic change. Yet this is inside the same processor family. > well not be defined by the architecture specification (if such exists) > at all, so I don't think you can count on there being a definitive > limit on the number of PMDs in general. On Itanium there is an architected limit. That is quite nice. I don't think having a per PMU-model limit is manageable. It would be hard to manage all the variations for the data structures. How would you handle the X86 family that way: one size for PIII, one size for P4. Yet the kernel support files would probably be the same, in fact the same kernel boots on both. > > The greatest number of PMDs on any PowerPC so far is 8, and I'm not > aware of any plans for CPUs with more, but it wouldn't surprise me if > it happened some day. Since this size can never be changed, without > breaking the ABI, we would have to leave room for expansion, and > there's no real guidance as to how much. > > So I think this should either be PM model dependent, or it should be > truly global - per-architecture is a bad compromise. The latter, > obviously, is much simpler to implement. Another thing to take into account is that you may want to use virtual PMDs to access software resources. For instance, take perfctr, there is TSC (timestamp) that could be mapped to a logical PMD. That way, it would be easy to specify it as part of the registers/resource to record in a sample (via the smpl_pmds[] mask). On Itanium, the debug registers are used by the PMU to restrict the code/data range where to monitor. In the old interface I had a specific call to program the IBR/DBR. In the revised document, you will see that I have logical PMC to do that. It simplifies the code and makes the interace more uniform, after all in this situation the debug registers are really use the configure the PMU. You can imagine mapping some kernel resources to PMD, such as amount of free memory, the PID of the current process. To make this work, it is really nice to have an upper bound for the physical PMU registers. Then you can add on top. That's what I did for Itanium.. Picking a single value would be good. Let's say you pick 256 for max physical PMC and max physical PMD. Assuming there are no big holes in the namespace. I think that is a pretty safe limit. Then logical PMD/PMC could be added above that. Of course if all really have is a set of 4 PMC, then you pay the copy cost for larger than needed data structures. But, the ABI would be preserved when the number of registers grow. > > > PFM_LOAD_CONTEXT > > > > > > I'm not sure I see the point of the load_set argument. What > > > can be accomplished with this that can't be with appropriate use of > > > PFM_START_SET? > > > > > You don't want to merge START and load. that is not because you attach > > to a thread/CPU that you want monitoring to start right away. > > But I think you have a good point. the interface guarantees that on > > PFM_LOAD_CONTEXT, monitoring is stopped. You need explicit START to > > activate. This is true even if you detached while monitoring was active. > > I need to check to see if there is something else involved here. > > Sorry, I don't fully follow what you're saying here (I can't parse the > first sentence, in particular). My point is it's not clear to me that > there's anything useful you can accomplish with: > CREATE <do stuff> LOAD START > that can't be done with > CREATE+ATTACH <do stuff> START > It depends on what you do in <stuff>. I assume that is where you program the PMU with PFM_WRITE_PMDS/PFM_WRITE_PMCS. I think you will see that the first model makes sense if you want to support batching: for(i=0; i < N ; i++) { c = CREATE PFM_WRITE_PMCS(c); PFM_WRITE_PMDS(c); } forearch(c) { ATTACH to target thread PFM_start. > > > PFM_CREATE_EVTSET / PFM_DELETE_EVTSET / PFM_CHANGE_EVSET > > > > > > Is there really a need to incrementally update the event sets? Would > > > a PFM_SETUP_EVTSETS which acts like PFM_CREATE_EVTSET, but replaces > > > all existing event sets with the ones described suffice. This > > > approach would not only reduce the number of entry points, but could > > > also simplify the kernel's parameter checking. For example at the > > > moment deleting an event set which is reference by another sets > > > set_id_next must presumably either fail, or alter those other event > > > sets to no longer reference the deleted event set. > > > > > This has been trimmed down to two calls in the new rev: > > PFM_CREATE_EVTSETS and PFM_DELETE_EVTSETS. If the event set > > already exist, PFM_CREATE_EVTSETS updates it. This is useful > > for set0 which always exists. > > > > As for delete/create, those operations can only happen when > > the context is detached. Checking of the validity of the event > > set chain is deferred until PFM_LOAD_CONTEXT because at this > > point it is not possible to modify the sets. If there set_id_next > > is invalid, PFM_LOAD_CONTEXT fails. > > But again, is there a real reason to allow incremental updates? If > there was a single operation which atomically changed all the event > sets it would mean one less entry point, *plus* we could do the error > checking there (earlier error checking is always good). And we > wouldn't even need user allocated id numbers, the array position would > suffice. > Keep in mind that PFM_CREATE_EVTSETS can be used to create multiple event sets at a time. This command can be called as many times as you want. If the set exists it is modified. You can create and delete event sets at will as long as the context is not attached. to anything. The set number determines its position in the list of sets. That list determines the DEFAULT switch order. Letting the user pick set number could be useful because it may correspond to some indexing scheme. The interface supports an override for the next set, this is the explicit next set. Why is this useful? This is interesting when the explicit link is pointing backwards. You can thus create sublists of sets. There is an example in the document.. > > > PFM_GET_CONFIG / PFM_SET_CONFIG > > > > > > Again, these definitely don't belong on the multiplexor. They don't > > > use the fd, and since they set the permission regime for the > > > mutliplexor itself, they logically belong outside. Under Linux these > > > definitely ought to be sysctls. If this is ever ported to other OSes, > > > I still don't think they belong here. Setting up the permission > > > regime for all the other calls is, I think a logically OS-specific > > > operation and doesn't belong in the core API. As far as I can tell > > > it's unlikely you would use these operations in the same programs > > > using the rest of perfmon. > > > > > As discussed earlier, on Linux, these operations could as well be > > implemented with sysctls. > > Yes, and I don't think having a cross-platform operation for this is > worthwhile. This is a system administrator operation, not an > operation for the users of perfmon, so I don't think having it > platform specific is a problem at all. > Fine with me. I'll switch to a pure sysctl approach then. > > In terms of porting, I am getting closer to being able to send you > > a skeleton header/C file with the required callbacks. Please let me > > know of any special PPC64 special behavior. For instance, looking > > at Opteron and Pentium 4: > > - on counter overflow, does PPC freeze the entire PMU > > Optional, IIRC, depending on some control bits in MMCR0. > Ok, at least there is something. > > - HW counters are not 64-bits, what are the values of > > the upper bits for counters. Should they be all 1 or all 0. > > The counter registers are 32 bits wide, but can only be effectively > used as 31-bit counters (see below). > > > - How is a counter overflow detected? When the full 64 bits > > of the counter overflow or when there is a carry from bit > > n to n+1 for a width on n. > > The interrupts occur on (32 bit) counter negative, rather than > overflow, per se. The only way to determine which counters have > overflowed is to look at the sign bits. Furthermore, the sign bit > must be cleared in order to clear the interrupt condition (hence only > 31-bit counters, effectively). Ok, that's fine. > > Another issue which I ran into for perfctr is that interrupts can't be > individually enabled or disabled for each counter. There is one > control bit which determines if PMC1 generates an interrupt on counter > negative, and another control bit which determines if other PMCs cause > an interrupt. I think that's fine also. For 64-bit software emulation you need to have overflow intr enabled for every counter anyway. > > Because the events for the counters are generally selected in groups, > rather than individually, you need to be able to deal with overflow > interrupts for a counter you don't otherwise care about. > > Performance monitor interrupts can also be generated from the > timebase. These occur on 0-1 transitions on bit 0, 8, 12 or 16 > (selectable) of the (64 bit) timebase. Timebase frequency is not the > same as CPU core frequency, and depends on the system, not just the > CPU (it can be externally clocked). The timebase is guaranteed to > have a fixed frequency, even on systems with variable CPU frequency, > so the ratio to CPU core frequency can also vary. > > > - Are there any PPC64 PMU registers which can only be used by > > one thread at a time (shared). Think hyperthreading. > > Not as far as I'm aware. That's good. > > > - Is there a way to stop monitoring without having to modify > > all used PMC registers. > > Yes, there is a "freeze counters" (FC) bit in MMCR0 which will stop > all the counters. > Ok. Thanks for your feedback. i am sure I'll come up with other PMU-specific questions. -- -Stephane
http://sourceforge.net/p/lse/mailman/lse-tech/?viewmonth=200503&viewday=31&style=flat
CC-MAIN-2015-32
en
refinedweb
Chatlog 2013-01-10 From Provenance WG Wiki See original RRSAgent log or preview nicely formatted version. Please justify/explain all edits to this page, in your "edit summary" text. 15:52:36 <RRSAgent> RRSAgent has joined #prov 15:52:36 <RRSAgent> logging to 15:52:38 <trackbot> RRSAgent, make logs world 15:52:38 <Zakim> Zakim has joined #prov 15:52:40 <trackbot> Zakim, this will be PROV 15:52:40 <Zakim> ok, trackbot; I see SW_(PROV)11:00AM scheduled to start in 8 minutes 15:52:41 <trackbot> Meeting: Provenance Working Group Teleconference 15:52:41 <trackbot> Date: 10 January 2013 15:52:42 <pgroth> Zakim, this will be PROV 15:52:42 <Zakim> ok, pgroth; I see SW_(PROV)11:00AM scheduled to start in 8 minutes 15:52:57 <pgroth> Agenda: 15:53:07 <pgroth> Chair: Paul Groth 15:53:22 <pgroth> rrsagent, make log publics 15:53:31 <pgroth> rrsagent, make logs public 15:53:38 <pgroth> Regrets: Curt Tilmes, Daniel Garijo, Khalid Belhajjame, Jun Zhao, Paolo Missier, zednik, hook 15:59:36 <Zakim> SW_(PROV)11:00AM has now started 15:59:45 <Zakim> + +44.238.059.aaaa 15:59:52 <Zakim> +[IPcaller] 15:59:55 <pgroth> can someone scribe? 16:01:46 <jcheney> jcheney has joined #prov 16:02:00 <pgroth> weka 16:02:19 <Zakim> + +44.131.467.aabb 16:02:21 <pgroth> mallet 16:02:47 <jcheney> zakim, aabb is me 16:02:47 <Zakim> +jcheney; got it 16:03:06 <pgroth> scribe: jcheney 16:03:08 <SamCoppens> SamCoppens has joined #prov 16:03:13 <Zakim> +??P33 16:03:18 <jcheney> topic: Admin <pgroth> Summary: Minutes of last week's telcon were accepted. Several actions were closed. 16:03:49 <TomDN> TomDN has joined #prov 16:04:43 <Zakim> + +329331aacc 16:04:48 <jcheney> pgroth: WF4Ever meeting so lots of people away 16:04:52 <TomDN> Zakim, +32 is me 16:04:52 <Zakim> +TomDN; got it 16:04:52 <pgroth> 16:05:00 <Dong> Dong has joined #prov 16:05:03 <TomDN> Zakim, SamCoppens is with TomDN 16:05:03 <Zakim> +SamCoppens; got it 16:05:05 <jcheney> pgroth: any objections to minutes? 16:05:34 <pgroth> 16:05:44 <jcheney> ... minutes from last week 16:05:59 <pgroth> accepted: January 3, 2012 minutes 16:06:10 <ivan> zakim, dial ivan-voip 16:06:10 <Zakim> ok, ivan; the call is being made 16:06:11 <Zakim> +Ivan 16:06:24 <jcheney> ... open action items: 16:06:43 <Luc> close action-154 16:06:43 <trackbot> Closed ACTION-154 Review the test cases. 16:06:47 <jcheney> ... closing some that were closed last week 16:06:47 <Luc> close action-155 16:06:47 <trackbot> Closed ACTION-155 Review the test cases. 16:07:03 <jcheney> ..stefan working on xml namespace 16:07:14 <jcheney> ... paul to send note on implementations, will do today/tomorrow 16:07:28 <pgroth> Topic: WG Implementations <pgroth> Summary: The current status of implementation reports were gone through. It was determined that the surveys are in the wrong group within WBS so that only W3C Team members (and not prov group members) can see the full results. Paul was actioned to remind Ivan to ask the W3C Systems team if they could move the surveys. Paul was actioned to go through the current results of the questionnaire and see where there are gaps. Broadly, it seems there are enough submissions in terms of usage but there are concerns about demonstrating interoperability between pairs of systems. Dong was asked to update the test case process document to refer to the WBS survey and not email. 16:07:28 <jcheney> ...stephan working on xml namespace 16:07:46 <jcheney> ... was hoping for update from stephan (who gets the emails) 16:07:59 <smiles> smiles has joined #prov 16:08:05 <jcheney> ... would like to see how to make a report from survey results 16:08:12 <Luc> 10 implementations, 5 vocab extensions 16:08:15 <Zakim> +??P7 16:08:44 <jcheney> ... Now have 9 impls, 5 vocabulary extensions 16:08:53 <jcheney> ... Would like to know what these are 16:09:09 <pgroth> action: send stephan an email to ask for all results of questionaires 16:09:09 <trackbot> Error finding 'send'. You can review and register nicknames at <>. 16:09:22 <pgroth> action: pgroth send stephan an email to ask for all results of questionaires 16:09:22 <trackbot> Created ACTION-158 - Send stephan an email to ask for all results of questionaires [on Paul Groth - due 2013-01-17]. 16:09:41 <pgroth> 16:10:04 <jcheney> ivan: can see all the responses 16:10:23 <jcheney> pgroth: who has done what? why can't anyone else see it? 16:10:31 <Luc> [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], 16:10:40 <Luc> these are the responders 16:10:45 <Luc> 16:11:01 <jcheney> ivan: <listing some of the responses> 16:11:56 <Luc> that's my 10 :-) 16:11:59 <pgroth> 16:12:30 <TomDN> Zakim, mute me 16:12:30 <Zakim> TomDN should now be muted 16:13:17 <jcheney> ivan: should be visible to members of "this group" but not sure which group it is. 16:13:30 <jcheney> ... vocabulary extensions: 5 for prov-o, none for others 16:13:43 <Luc> [email protected], [email protected], [email protected], [email protected], [email protected], 16:13:48 <Luc> 16:13:54 <jcheney> q+ 16:14:14 <jcheney> ivan: what would extension mean for prov-n? 16:14:40 <Luc> you could write an xml schema that extends prov-xml schema (but this is not recommendation track) 16:14:40 <jcheney> pgroth: no results for vocabulary usage 16:14:43 <pgroth> ack jcheney 16:14:49 <jcheney> 16:15:51 <jcheney> jcheney: the questionnaires are in the "Test Group". Can we move them to our group? 16:15:53 <pgroth> action: pgroth to send ivan an email to put the questionnaires in the right group 16:15:53 <trackbot> Created ACTION-159 - Send ivan an email to put the questionnaires in the right group [on Paul Groth - due 2013-01-17]. 16:16:07 <jcheney> ivan: this may be a mistake... will ask sysadmins if it can be fixed 16:16:14 <pgroth> q+ 16:16:17 <pgroth> q? 16:16:19 <pgroth> ack pgroth 16:16:30 <jcheney> ivan: do you want to see the feature coverage? 16:17:11 <jcheney> pgroth: would like to see feature coverage & interoperability in implementation report 16:17:21 <pgroth> 16:17:21 <pgroth> [4:06pm] 16:18:21 <jcheney> zakim, who is noisy 16:18:21 <Zakim> I don't understand 'who is noisy', jcheney 16:18:23 <jcheney> zakim, who is noisy? 16:18:33 <Zakim> jcheney, listening for 10 seconds I heard sound from the following: ??P7 (4%), ??P33 (55%), Ivan (48%) 16:18:44 <jcheney> zakim, mute ??P33 16:18:44 <Zakim> ??P33 should now be muted 16:19:16 <jcheney> @ivan: could you make a screenshot of results and people can look at it off-line? 16:19:22 <Luc> q+ 16:20:11 <jcheney> luc: we have 4 impls that write Entity and 5 that read / write Entity 16:20:11 <pgroth> yeah that's correct 16:20:56 <jcheney> ivan: then averages are meaningful: 4.56 is good 16:21:38 <jcheney> pgroth: for entity, agent we're fine 16:22:09 <jcheney> luc: 6 say no support for invalidation, 1 r/o, 2 r/2. can we assume one reads what the other has written? 16:22:30 <jcheney> pgroth: would be good to see the actual people, so we can check this 16:23:15 <jcheney> pgroth: would like to make this public/group readable, and see where there are gaps 16:23:19 <pgroth> ack luc 16:23:20 <pgroth> q? 16:23:26 <jcheney> ivan: sounds reasonable 16:23:36 <Luc> q+ 16:24:07 <jcheney> luc: just after averages table there are details, but only for two responses - why? 16:24:19 <Dong> that's what I see as well 16:24:56 <jcheney> ivan: can see all 9 rows 16:25:04 <jcheney> ... with all responses 16:25:20 <jcheney> pgroth: we need to see what ivan sees asap 16:25:30 <pgroth> ack Luc 16:26:00 <jcheney> pgroth: wanted to ask dong what we expect back on constraints 16:26:05 <Luc> q? 16:26:22 <jcheney> ... should they email prov-public-comments or fill out a form or what? 16:26:38 <pgroth> q? 16:26:47 <pgroth> no dong 16:26:48 <Luc> dong? 16:26:56 <jcheney> zakim, unmute ??P33 16:26:56 <Zakim> ??P33 should no longer be muted 16:27:06 <jcheney> zakim, ??P33 is Dong 16:27:06 <Zakim> +Dong; got it 16:27:37 <jcheney> Dong: Decided to use questionnaire and not email, haven't removed email yet, will do soon 16:27:59 <jcheney> ... put link in call for implementations 16:28:08 <pgroth> it's not on the main page 16:28:42 <jcheney> pgroth: will update main page after changes made 16:28:45 <pgroth> i will do that 16:28:48 <jcheney> Dong: need to update front page 16:28:59 <jcheney> zakim, mute ??P33 16:28:59 <Zakim> sorry, jcheney, I do not know which phone connection belongs to ??P33 16:29:04 <jcheney> zakim, mute Dong 16:29:04 <Zakim> Dong should now be muted 16:29:13 <pgroth> Topic: Prov-Dictionary <pgroth> Summary: Tom and Sam have prov-dictionary almost ready for review by the group. They will send an email tomorrow when internal review should begin. The internal reviewers for this document are: Paolo, Stian, James, Luc Paul 16:29:19 <TomDN> Zakim, unmute me 16:29:19 <Zakim> TomDN should no longer be muted 16:29:47 <jcheney> TomDN: prov-dict pushed just before call; everything done except prov-xml 16:29:58 <jcheney> ... can be reviewed starting tomorrow 16:30:08 <jcheney> ... incorporates results of discussion on mailing list 16:30:25 <jcheney> ... can be considered for fpwd after review 16:30:28 <TomDN> 16:30:39 <jcheney> pgroth: please send email / issue tomorrow for review 16:30:46 <pgroth> q? 16:31:01 <pgroth> Topic: PROV-AQ <pgroth> Summary: Prov-aq has been made available for internal review. Reviews are due by the Jan. 17, 2013 telcon. The internal reviewers are Tim, Simon, Luc, Dong and Stian 16:31:06 <Luc> q+ 16:31:12 <TomDN> Zakim, mute me 16:31:12 <Zakim> TomDN should now be muted 16:31:20 <jcheney> Luc: can we confirm reviewers for prov-dictionary? 16:31:55 <jcheney> pgroth: paolo, stian, james(?), luc, pgroth 16:31:57 <Luc> Paolo, Stian, James (maybe), Luc, and Paul 16:32:08 <pgroth> ack Luc 16:32:08 <Luc> ack luc 16:32:19 <pgroth> 16:32:49 <jcheney> pgroth: reviewable version is available, questions for review in issue 613 16:32:58 <jcheney> ... would like feedback on pingback 16:33:10 <pgroth> Tim, Simon, Luc, Dong and Stian 16:33:14 <pgroth> q? 16:33:14 <jcheney> ... "last call" before prov-aq released as ready for implementation 16:33:52 <pgroth> q? 16:33:59 <jcheney> ... deadline for review is thursday next week 16:34:19 <pgroth> Topic: Prov-o encoding constraints <pgroth> Summary: The group went over ISSUE-612 on encoding constraints in OWL. Two issues were identified: 1) that Kerry though that wasDerivedFrom was transitive, which it is not. 2) Whether prov-o should include encodings of constraints. For 1) Luc agreed to formulate a response to kerry. For 2) the group agreed that that encoding owl constraints was not part of prov-o and that it was an implementation. It was also agreed that this should be signposted in the various documents. Paul agreed to formulate a response. 16:34:26 <pgroth> 16:34:46 <jcheney> pgroth: comments from two implementors working with prov-o and constraints 16:35:08 <jcheney> ... looking for constraints implementable in OWL to be part of prov-o 16:35:40 <jcheney> ... this was discussed and resolved not to do this earlier, but this could be an implementation technique 16:35:46 <Luc> q+ 16:35:50 <jcheney> ... how to address? 16:36:22 <jcheney> Luc: no consensus for derivation to be transitive; we voted against this and it is not a constraint in the document. 16:37:01 <pgroth> ack luc 16:37:15 <jcheney> luc: should review & approve responses, but would be good to tell them this specific point 16:37:34 <ivan> q+ 16:37:37 <Luc> action: Luc to write a response to Kerry why derivation is not transitive 16:37:37 <trackbot> Created ACTION-160 - Write a response to Kerry why derivation is not transitive [on Luc Moreau - due 2013-01-17]. 16:37:41 <pgroth> ack Ivan 16:38:01 <jcheney> ivan: what is wrong with putting expressible constraints in separate document? 16:38:33 <jcheney> ... don't see a case for editing prov-o core document 16:39:06 <jcheney> pgroth: fine if people (in or out of wg) want to encode constraints, but not necessarily part of wg delierables 16:39:14 <jcheney> s/delierables/deliverables/ 16:39:27 <jcheney> ivan: if wg members do this, we can at least publish it somewhere 16:39:28 <pgroth> q? 16:39:42 <Luc> q+ 16:39:47 <pgroth> ack luc 16:40:07 <jcheney> Luc: need to respond to reviewers, along lines Paul gave 16:40:16 <jcheney> ... open questions whether some/all constraints implementable and how 16:40:52 <jcheney> ... wg decided to view this as an implementation issue, we can offer to gather experiences with this/axioms suggested by implementors 16:41:27 <jcheney> pgroth: sounds fine, but it seems to come up - should we say this in prov-o or constraints saying this? 16:42:11 <pgroth> q? 16:42:17 <jcheney> Luc: seems reasonable. not sure where. james? 16:43:58 <pgroth> q? 16:44:16 <jcheney> jcheney: could put disclaimer/explanation in constraints, maybe signpost elsewhere 16:44:39 <jcheney> pgroth: could say something in overview, prov-o also 16:45:10 <pgroth> action: pgroth to draft response on owl implementation of prov-constraints 16:45:11 <trackbot> Created ACTION-161 - Draft response on owl implementation of prov-constraints [on Paul Groth - due 2013-01-17]. 16:45:28 <pgroth> topic: Test cases response <pgroth> Summary: The group discussed issue 611 an in particular the issue with test cases. The group noted that the test cases should not be normative as these may change and be updated. In addition the group noted that if the test cases and the spec disagreed it would be hard to determine which was the tie breaker. Thus, there was consensus that the test cases were non-normative. Luc noted that the test cases can also be used as good examples of provenance and thus function as test cases for the two normative serialisations (prov-o, prov-n) . The group agreed to try to draft responses to all comments by Monday. Each part of issue 611 was divided up and assigned to a group member as documented on 16:45:36 <pgroth> 16:46:34 <jcheney> pgroth: james responded to questions about constraints; this seems fine as response to that part if wg can endorse 16:46:44 <jcheney> ... also asked about prov-constraints test cases 16:46:54 <jcheney> ... should they be part of spec? 16:47:07 <pgroth> We 16:47:07 <pgroth> would like to see test suites for the other operational parts of PROV, 16:47:07 <pgroth> in particular for testing inferences separate from validation. 16:47:26 <jcheney> ... in particular, in a normative place. 16:47:39 <jcheney> ... and would like further test cases for other documents 16:47:43 <pgroth> q? 16:47:45 <jcheney> ... how should we respond 16:47:54 <jcheney> ivan: why do they want it in normative spec? 16:48:11 <jcheney> pgroth: if normative, then better interoperability (they say) 16:48:42 <jcheney> ivan: that's a matter of opinion. otoh, if list of test cases become normative, cannot extend them later, or would have normative & non-normative tests 16:49:00 <jcheney> ... if test suite is non-normative, then we have capability to add new tests even when docs published 16:49:14 <jcheney> ... had this in rdfa wg 16:49:23 <Dong> q+ 16:49:46 <jcheney> ... discrepancies between implementations arose, which were addressed through additional tests 16:49:52 <Dong> Zakim, unmute me 16:49:52 <Zakim> Dong should no longer be muted 16:49:52 <pgroth> q? 16:50:26 <jcheney> Dong: if we move test cases into normative, are we saying that an impl that passes all test cases are compliant? We would have two definitions 16:50:34 <jcheney> ... test cases and original spec 16:50:44 <jcheney> ... cannot be sure that test cases cover all constraints. 16:50:59 <jcheney> ... would provide false sense of ciompliance 16:51:00 <pgroth> q? 16:51:03 <pgroth> ack Dong 16:51:06 <Dong> Zakim, mute me 16:51:06 <Zakim> Dong should now be muted 16:51:06 <Luc> q+ 16:51:08 <jcheney> s/ciompliance/compliance 16:51:33 <jcheney> Luc: other issue is that we don't have formal mappings / equivalence between the serializations 16:51:39 <pgroth> ack Luc 16:51:50 <jcheney> ... it could be that some test cases would work in prov-n and not rdf or vice versa. 16:52:07 <jcheney> ... not in favor of normative test cases 16:52:40 <jcheney> ... do we need other test cases for other specs? 16:52:49 <jcheney> (previous line is pgroth) 16:52:57 <jcheney> pgroth: do we need other test cases for other specs? 16:52:59 <pgroth> q? 16:53:04 <Luc> q+ 16:53:23 <pgroth> ack Luc 16:54:08 <jcheney> Luc: the test suite contains typical examples expressed in prov-n, prov-o. what else could we do beyond having weird examples to exercise syntax? 16:54:17 <jcheney> ... more interesting to have useful provenance examples 16:54:49 <jcheney> pgroth: test cases provide example repository, can be used to test compliance with other specs 16:54:55 <pgroth> q? 16:55:03 <Luc> this was a suggestion from the SW coordination group that we have a set of useful provenance examples 16:55:22 <Luc> q+ 16:55:28 <jcheney> pgroth: we seem to have outline of response 16:55:49 <jcheney> Luc: important to try to provide responses promptly because they are trying to implement and may be waiting before submitting reports 16:56:10 <Luc> i have updated page 16:56:13 <Luc> q+ 16:56:36 <pgroth> ack Luc 16:56:43 <jcheney> pgroth: Luc will do response to Kerry, Paul will respond to general question need one for test cases and their comments on constraints 16:57:19 <jcheney> Luc: created page for responses 16:58:04 <jcheney> ... suggest we assign people to address these 16:58:53 <jcheney> pgroth: prov-o (611) essentially same as 612 about encoding constraints in owl, paul will do these 16:59:22 <jcheney> ... jcheney will do 611 (constraints) 17:00:06 <Dong> ok 17:00:09 <jcheney> ... 611 (normative test cases) - Dong 17:00:32 <jcheney> ... can we do this by monday? 17:01:20 <jcheney> Luc: who will do test cases for other specifications? 17:01:24 <jcheney> pgroth: will do that 17:01:39 <jcheney> pgroth: goal do send for approval by wg on monday 17:01:46 <jcheney> pgroth: goal to send for approval by wg on monday 17:01:52 <jcheney> ... so we can send back on tuesday 17:01:54 <pgroth> q? 17:02:42 <jcheney> pgroth: seems uncontroversial so hopefully we can approve over mailing list 17:02:47 <jcheney> ... or at least try 17:02:59 <Dong> Monday is fine for my part 17:03:16 <pgroth> q? 17:03:30 <SamCoppens> Bye 17:03:34 <Zakim> -Ivan 17:03:34 <Zakim> - +44.238.059.aaaa 17:03:34 <Zakim> -??P7 17:03:36 <Zakim> -TomDN 17:03:43 <SamCoppens> SamCoppens has left #prov 17:03:46 <Dong> bye everyone 17:03:47 <Zakim> -jcheney 17:03:49 <Zakim> -[IPcaller] 17:03:55 <pgroth> rrsagent, set log public 17:03:59 <pgroth> rrsagent, draft minutes 17:03:59 <RRSAgent> I have made the request to generate pgroth 17:04:03 <pgroth> trackbot, end telcon 17:04:03 <trackbot> Zakim, list attendees 17:04:03 <Zakim> As of this point the attendees have been +44.238.059.aaaa, [IPcaller], +44.131.467.aabb, jcheney, +329331aacc, TomDN, SamCoppens, Ivan, Dong 17:04:11 <trackbot> RRSAgent, please draft minutes 17:04:11 <RRSAgent> I have made the request to generate trackbot 17:04:12 <trackbot> RRSAgent, bye 17:04:12 <RRSAgent> I see 5 open action items saved in : 17:04:12 <RRSAgent> ACTION: send stephan an email to ask for all results of questionaires [1] 17:04:12 <RRSAgent> recorded in 17:04:12 <RRSAgent> ACTION: pgroth send stephan an email to ask for all results of questionaires [2] 17:04:12 <RRSAgent> recorded in 17:04:12 <RRSAgent> ACTION: pgroth to send ivan an email to put the questionnaires in the right group [3] 17:04:12 <RRSAgent> recorded in 17:04:12 <RRSAgent> ACTION: Luc to write a response to Kerry why derivation is not transitive [4] 17:04:12 <RRSAgent> recorded in 17:04:12 <RRSAgent> ACTION: pgroth to draft response on owl implementation of prov-constraints [5] 17:04:12 <RRSAgent> recorded in 17:04:13 <Zakim> -Dong 17:04:14 <Zakim> SW_(PROV)11:00AM has ended 17:04:14 <Zakim> Attendees were +44.238.059.aaaa, [IPcaller], +44.131.467.aabb, jcheney, +329331aacc, TomDN, SamCoppens, Ivan, Dong # SPECIAL MARKER FOR CHATSYNC. DO NOT EDIT THIS LINE OR BELOW. SRCLINESUSED=00000306
http://www.w3.org/2011/prov/wiki/Chatlog_2013-01-10
CC-MAIN-2015-32
en
refinedweb
System.Runtime Namespaces .NET Framework 4 The System.Runtime namespaces contain types). Show:
https://msdn.microsoft.com/en-us/library/gg145017(v=vs.100).aspx
CC-MAIN-2015-32
en
refinedweb
Manages joined fields for a vector layer. More... #include <qgsvectorlayerjoinbuffer.h> Manages joined fields for a vector layer. Definition at line 32 of file qgsvectorlayerjoinbuffer.h. Definition at line 25 of file qgsvectorlayerjoinbuffer.cpp. Definition at line 30 of file qgsvectorlayerjoinbuffer.cpp. Joins another vector layer to this layer. Definition at line 63 of file qgsvectorlayerjoinbuffer.cpp. Create a copy of the join buffer. Definition at line 384 of file qgsvectorlayerjoinbuffer.cpp. Quick way to test if there is any join at all. Definition at line 62 of file qgsvectorlayerjoinbuffer.h. Calls cacheJoinLayer() for all vector joins. Definition at line 251 of file qgsvectorlayerjoinbuffer.cpp. Emitted whenever the list of joined fields changes (e.g. added join or joined layer's fields change) Find out what is the first index of the join within fields. Returns -1 if join is not present Definition at line 349 of file qgsvectorlayerjoinbuffer.cpp. Finds the vector join for a layer field index. Definition at line 369 of file qgsvectorlayerjoinbuffer.cpp. Return a vector of indices for use in join based on field names from the layer. Definition at line 175 of file qgsvectorlayerjoinbuffer.cpp. Reads joins from project file. Definition at line 310 of file qgsvectorlayerjoinbuffer.cpp. Removes a vector layer join. Definition at line 95 of file qgsvectorlayerjoinbuffer.cpp. Updates field map with joined attributes. Definition at line 196 of file qgsvectorlayerjoinbuffer.cpp. Definition at line 64 of file qgsvectorlayerjoinbuffer.h. Saves mVectorJoins to xml under the layer node. Definition at line 265 of file qgsvectorlayerjoinbuffer.cpp.
http://qgis.org/api/classQgsVectorLayerJoinBuffer.html
CC-MAIN-2015-32
en
refinedweb
Products and Services Downloads Store Support Education Partners About Oracle Technology Network Name: krC82822 Date: 02/17/2001 17 Feb 2001, eval1127@eng -- see also # 4187333. java version "1.3.0" Java(TM) 2 Runtime Environment, Standard Edition (build 1.3.0) Java HotSpot(TM) Client VM (build 1.3.0, mixed mode) The java runtime right now is pretty inefficient when it comes to having multiple runtimes. You can take the smallest class, run it, and it will take up X megs of RAM. And the next JVM running it will take up another X megs. and a third JVM will take up another X megs. [trivial little program follows below.] I'm happy to see that 'X' has decreased, from 12 megs for JDK1.2, to about 6 megs for JDK1.3 But in principle, I would think it could be better still. If you want java programs to be used as commonly as C programs, you need to make the runtime similarly efficient about sharing common objects. One of the reasons C is so memory-efficient is that it has a shared "libc". The java runtime seems to lack that. I don't think the common system classes are being shared at all. I guess they're all being loaded in from "rt.jar" separately. What the runtime needs is a pre-expanded set of instances of the standard classes, that can be shared in a .so with additional runtimes. [Or something similar anyway. Hopefully, you guys can think of something that would deliver a similar level of memory sharing] Here's a trivial class to demonstrate the problem: public class test { public static void main(String args[]){ System.out.println("Sleeping..."); try { Thread.sleep(10000); } catch(Exception err){ } System.out.println("Done"); } } Each invocation in parallel, takes the same amount of additional memory as the initial jvm invocation does. 6 megs each time, for a 542-byte class that doesnt even attempt to open a graphics window. This kind of memory footprint makes it impractical to write small java utilities. If everyone on a system were to use them, they would quickly swallow available RAM. (Review ID: 117186) ====================================================================== CONVERTED DATA BugTraq+ Release Management Values COMMIT TO FIX: tiger-beta FIXED IN: tiger-beta INTEGRATED IN: tiger-beta WORK AROUND Name: krC82822 Date: 02/17/2001 none ====================================================================== EVALUATION ###@###.### 2001-10-12 In progress. ------------------------------------------------------------------ Class data sharing (CDS) has been introduced in the 1.5 release as the first step toward addressing this problem. The CDS code was originally developed by Apple Computer, Inc. during their port of the Java HotSpot VM to Mac OS X and was further developed within Sun primarily by ###@###.###. CDS is supported on platforms where the Java HotSpot Client VM is the default, except for Windows 95/98/ME. When the JRE is installed on supported platforms using the Sun provided installer, the installer loads a set of classes from the system jar file into a private internal representation, and dumps that representation to a file, called a "shared archive". If the Sun JRE installer is not being used, the shared archive can be created manually, as is explained in the Java HotSpot VM documentation (linked to from the 1.5.0 release notes). During subsequent JVM invocations, the shared archive is memory-mapped in, saving the cost of loading those classes and allowing much of the JVM's metadata for these classes to be shared among multiple JVM processes. The primary motivation for including CDS in the 1.5 release is the decrease in startup time it provides. CDS produces better results for smaller applications because it eliminates a fixed cost: that, less data is loaded out of the shared archive because the metadata for unused methods remains completely untouched as opposed to being created and processed during class loading. These savings allow more applications to be run concurrently on the same machine. However, on some operating systems (in particular, Windows), the perceived footprint of the process may increase, because a larger number of pages are being mapped in to the process's address space. Reducing footprint, including perceived footprint, remains a high priority and work is ongoing in this area. The class data sharing work is a step toward reducing the startup time and footprint of Java applications without sacrificing stability or compatibility. In the future we plan to share and cache other data in the archive and extend its functionality to application-level classes to more significantly improve startup performance for larger applications. ###@###.### 2004-01-23
http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4416624
CC-MAIN-2015-32
en
refinedweb
sethostname() Set the name of the current host Synopsis: #include <unistd.h> int sethostname( const char * name, size_t namelen ); Since: BlackBerry 10.0.0 Arguments: - name - The name that you want to use for the host machine. Hostnames are limited to MAXHOSTNAMELEN characters (defined in <sys/param.h>). - namelen - The length of the name. Library: libc Use the -l c option to qcc to link against this library. This library is usually included automatically. Description: The sethostname() function sets the name of the host machine to be name. Only the superuser can call this function; this is normally done only at boot time. This function sets the value of the _CS_HOSTNAME configuration string, not that of the HOSTNAME environment variable. Errors: - EFAULT - Either name or namelen gave an invalid address. - EPERM - Although the caller wasn't the superuser, it tried to set the hostname. Caveats: This function is restricted to the superuser, and is normally used only at boot time. Last modified: 2014-06-24 Got questions about leaving a comment? Get answers from our Disqus FAQ.comments powered by Disqus
http://developer.blackberry.com/native/reference/core/com.qnx.doc.neutrino.lib_ref/topic/s/sethostname.html
CC-MAIN-2015-32
en
refinedweb
Another CR for Hibernate Search 4.1 is ready! Even being in candidate release phase, we introduced something more than the usual minor bugfixes, as the following improvements are too nice to delay and technically not very risky. Depending on Hibernate 4.1 Hibernate Search was updated to work with Hibernate ORM 4.1, as it was still using 4.0. Rethinking the JGroups integration The JGroups Channel is the communication transport used when connecting multiple nodes in cluster using JGroups; before 4.1.0.CR3, Hibernate Search would expect you to configure a Channel for each clustered index, but having to configure multiple JGroups Channel is tedious: for example, each channel should use a different set of network ports. The Channel is now a service shared across all indexes: every index configured to use JGroups will share the same Channel instance. This simplifies configuration, network administration and speeds up initialization. Configuration details are described in the JGroups configuration paragraph. If you were using JGroups before, please see the Migration Guide. JGroups channel injection It is now possible to have Hibernate Search use an existing JGroups Channel, injecting the instance in the configuration. This was primarily introduced for other frameworks integrating our search engine, such as CapeDwarf, so they can control the Channel lifecycle and make use of alternative initialization options. Remember however: Search installs it's own message Receiver, it's not going to share the channel with other services! import org.hibernate.search.backend.impl.jgroups.JGroupsChannelProvider; org.jgroups.JChannel channel = ...//initialize or lookup the channel directly Map<String,String> properties = new HashMap<String,String>(); properties.put( JGroupsChannelProvider.CHANNEL_INJECT, channel ); properties.put( //...other options EntityManagerFactory emf = Persistence.createEntityManagerFactory( "userPU", properties ); Plans for next... We're working as well on making the master/slave an automatic election process, but that's too big of a change for a CR, so consider it just a teaser for upcoming 4.2 ! Of course, you can help starting to test it today if you're willing to participate in the coding and try the bleeding edge. The usual links - Download it from Sourceforge or via Maven artifacts - Get in touch on the forums or on the mailing list - Help out: have a look at JIRA and get the code from GitHub
http://in.relation.to/2012/03/29/hibernate-search-41-cr-3/
CC-MAIN-2015-32
en
refinedweb
This article presents a very small scale blog engine developed using ASP.NET MVC, Entity Framework and SQLServer. The basic idea behind creation of this project is to understand the ASP.NET MVC architecture by following the approach of learning by implementing a rudimentary blog engine. It was quite recently that I started working with MVC 3.0. Before that I was doing Web Forms development. In the last few month of working with MVC I started liking MVC a lot. I talk a lot about MVC at a local user group. Some young developers of this local user group asked me to explain MVC architecture using a hands on approach. I took the challenge and this application is the result of that. Note: I did a similar project to teach n-tier architecture last year. It can be found here: YaBlogEngine - A Tiny Blog Engine written in ASP.NET/C#[^] I developed this application for them and developed it piece by piece with explanation on each topic. Here in this article, I am simply giving the code for this sample application and will talk about various design decisions I took and why. For instance, this application is developed in MCV 2.0 with ASPX views and not RAZOR view. The reason for this was this was to keep the aspx page code on same lines as of that in web forms. If I would have started talking about RAZOR in the first place then that would be little digressing. Now I cannot put all the MVC theory needed for this project in this single article. So I will just show how this code can be used and will perhaps take separate articles to discuss independent Topics of MVC separately. We Now we could have done the data access using classic ADO.NET easily but for this project Entity Framework is used for data access. The major reason for this was that it will generate all the boilerplate functionality for the Data access and at the same time it will generate the entities for the database objects. Now these entities can readily be used as Model classes in our applications and thus we need not write our domain model classes separately. Entity Framework The generated entities for the database looks like: Now the benefit of using Entity framework is that we get all the entities readily to be used as models. But use of entity framework generate Context will lead to scattered code for data access across application. So to prevent this we need to create a single point from where all the database operations will be done. This class will be called as the repository class. Now we need to create multiple repository class each mapping to the domain model of data model. Now how will these multiple repositories use the ObjectContext. For this we put these repositories behind the UnitofWork object whose responsibility will be to hand over the respective repository to the controller classes. ObjectContext UnitofWork This complete structures looks like: This explanation for Repository pattern is too abstract for the beginners and we cannot cover the repository and unit of work pattern in one paragraph. But the main intention was for those who know this pattern will know that it is being used. If you don't know the pattern then simply look at the respective classes and the functionality will be very clear. Note: Now a question might arise, Why not use generic repositories here? Well this was simply a design decision because the target audience of this exercise was not comfortable with generic repositories. they needed all the predicates to be inside the repository classes and have specific repositories. We have an account controller already created when we created the project. We will use the default membership for this project. We will also create two more controllers one for blogs and comments and other for managing categories. Also, we need some data in our master page and thus we need to have all the controllers create this data while they are being instantiated. To make this easy, we created and abstract AController class. This class only does the job of preparing the data needed when the controllers are instantiating i.e. data that is being used in master page. We will then derive all the controllers from this abstract class.. We are using unobtrusive validations for this project i.e. We put data annotations in the entity classes' and that will take care of the validations. For example to make all the fields of blog entry as required we did the following: [MetadataType(typeof(BlogMetaData))] public partial class Blog { } // These will facilitate unobtrusive validations public class BlogMetaData { [Required(ErrorMessage="Subject is required before posting a blog")] public string Body { get; set; } [Required(ErrorMessage = "Blog content is required before posting a blog")] public string Subject { get; set; } } This whole project has been created as a teaching exercise. It has a lot of scope for improvements. But since this was created for teaching how to create the first MVC application following things we not done and should be done to make this project even better. regex This article is not meant to be a tutorial on MVC or anything else. Rather it just explains a little bit about the attached sample application and how to use/refer it code. This sample application has been created during a 4 hour training session I conducted to teach the MVC to web form developers. We cannot cover all the theoretical aspects associated with this article in this single article. So I am posting the "using the code" guide along with the sample code hoping that someone might find it useful. This is in no way a value add for experienced programmers but beginner's might want to play around with the code. I hope this sample application will be somewhat useful for someone..
http://www.codeproject.com/Articles/575189/YaBlogEngineMVC-A-Tiny-Blog-Engine-written-in-ASP?fid=1830186&df=90&mpp=10&noise=1&prof=True&sort=Position&view=Quick&spc=Relaxed
CC-MAIN-2015-32
en
refinedweb
Recent: Archives: waitand notifymethods of the Object class. In "Using threads in Java, Part II" I took the synchronized threads and created an in-memory communication channel so that two or more threads could exchange object references. Then in "Using communication channels in applets, Part 3" I applied these communication channels to co-operating applets that were laid out in an HTML page by a Java-aware browser. These applets combined to create an active HTML page that implemented a simple temperature conversion applet. In this last column on threads and interapplet communication I'll look at a couple issues associated with this approach, and in particular I'll discuss how a layer can be created on top of the existing DataChannel design to allow multiple DataChannels to feed into a single DataChannel. The source code is also online and available for your use, in either an elaborate form, or as a tar or zip archive. Next month I'll pick on another oft-misunderstood aspect of the Java system, class loaders. We implement this easily with a DataChannel. The skeleton of the OptionButton control is as follows: 1 public class OptionButton extends Applet implements Runnable { 2 public void init() { 3 new DataChannel(getparameter("datachannel"); 4 state = false; 5 } 6 public void paint(Graphics g) { 7 ... display my label and my choice ring ... 8 } 9 public void start() { ... create the data channel ... } 10 public void stop() { ... release the data channel ... } 11 public void run() { 12 while (thread == currentthread) { 13 value = getValue(); 14 current_state = value == myValue; 15 repaint(); 16 } 17 } 18 public boolean mouseUp(...) { sendValue(myID); } 19 } As you can read in the code, the applet is quite simple. The magic is taken care of by the DataChannel. The basic applet knows only two things -- how to render itself in the checked and unchecked state -- and when it gets clicked it sends its value out to its DataChannel, which is the same channel that it is monitoring. When an option button receives a value from its DataChannel, if the value is equivalent to its own value, it sets its checked state to true and repaints itself. Can you see how it would be modified to be a non-exclusive choice control? Certainly the boundary condition of an OptionButton with no one else in the group would get halfway there. But recall that the way the button gets unset is that some other choice is set. Obviously the mouseUp method would have to implement a toggle rather than a single set semantic. I'll leave it as an exercise for you to create this class (although there is a version in the sources), and when your write it, change the box shape from round to square. This will give users the idea that this is not a single-choice option.
http://www.javaworld.com/javaworld/jw-07-1996/jw-07-mcmanis.html
crawl-002
en
refinedweb
Plone Upgrade Guide Instructions and tips for upgrading to a newer Plone version. 1. Introduction What does it mean to upgrade Plone? 2.5.x to 3.3. In particular, read everything in the "common problems and issues" section. The guide applies to all contemporary versions of Plone, and we have also included the older, unsupported versions for reference. A note about version numbering and terminology Up until Plone 2.1, the policy was that each of our major releases would be incremented 0.1, like a standard framework policy. This caused some confusion and false expectations on how complex an upgrade would be, and have since changed this policy. Starting after the 2.5 release, we have moved to a policy that increases the version number to a .0 on every major release. This means that when we say a "major release", we are referring to a x.0 release, whereas a minor release has the version numbering 2.5.x or 3.0.x. In addition to the general procedure there are version-specific migration guides. These guides contain more specific instructions and valuable information that has been collected from real-life migration cases. 2. Preparations Things to do before you migrate Plone. Gather information - Read the "What's new in..." for your relevant plone version, and read the release notes. You'll find these in the CMFPlone directory of the distribution of the new version of Plone. - it's) Back up your Plone site It's very important to back up your Plone site. You will find an excellent how-to on backing up your Plone site here. Setup a test environment to rehearse the upgrade! 3. Upgrading Plone A quick overview of how to upgrade your Plone site. This is how you do most of the migrations, and is generally all that is requred for upgrades between minor versions of Plone. When upgrading to a newer release of Plone, it is important to run the content migration procedure, since internal structures in Plone might have changed since the last version. This is the general procedure for upgrading. Before you start upgrading anything, make sure you have a backup. The basic manual procedure is detailed below. If you are using the installers, you can skip the part about moving away directories and replacing them with the new ones (step 3-4) - it should be handled by the installer for you. - Back up your entire Plone directory - Shut down your Plone server instance - Remove the Product directories you want to replace (ie. the ones in the package you downloaded) - Put in the new Product directories - Start Plone again - your site may be inaccessible until we have performed the next steps - don't panic :) - Go to (aka. the ZMI) and click portal_migrations - Make sure you are on the Upgradetab (in older versions, this tab is called Migrate) — it will state something like: Instance version: 2.5.3 File system version: 3.1.1 - This means that you have to run the upgrade procedure to be updated to 3.1.1. - Click the Upgradebutton. If you want to see what steps the upgrade would go through without making the actual changes, you can check the Dry Runoption - this will do the exact same steps as a normal upgrade/migration will do, but not write anything to the database. - The site will now be updated, this may take a while, depending on which versions you upgrade from/to. For example, the upgrade from Plone 2.0 to Plone 2.1 involves conversion and re-cataloging of all content in your site, so if you have a big site, this may take a while. Be patient. For those of you who wonder why we don't do this automatically, the reason is that we don't want to modify your data, and you should have the opportunity to back up the data before doing the upgrade. For advanced/enterprise users: It is normally possible to upgrade in-place (at least between minor versions) without any site downtime if you run ZEO and multiple load-balanced instances. See the ZEO documentation for more information if you need this. 4. Upgrade add-on products The steps to take to migrate your third party products - Shut down your Plone server instance - Navigate to your Plone instance product directory - Remove the directories of the products you want/ need to replace - Copy the new product directories across, and check that the permissions on each product directory are correct - Start Plone again - your site may be inaccessible until we have performed the next step - don't panic :) - Navigate to the quickinstaller in the ZMI, and reinstall or upgrade products if you can (products that support both your current and new version of Plone). Perform product-specific upgrade procedures (if any). You will find these in the docs of each product 5. Troubleshooting What to do when a problem occurs during a Plone migration. When a problem occurs during the migration we recommend that you take the following steps. Check the log files When a site error occurs, or Zope fails to start, there's probably an informative error message in Zope's log files. Locate these log files and inspect event.log. Ignore irrelevant warnings and search for words such as error, exception and traceback (case-insensitive). When Zope doesn't start and there's no useful information in the log file, you can start Zope interactively and watch for error messages in the output: zopectl fg You may be able to find more information on the error messages in: - the Version-specific migration tips for your version of Plone - the Error References - the pastebin - where error messages and code fragments are shared and debugged collectively Test without customizationscheck your customizations. It's usually best to copy the original files of the new version of Plone to your skin, and re-customize those. Test without productspenetrable to others. Ask for help on a mailing list Ask for help on the Plone setup list. Be sure to: - Provide relevant source code for your customizations that are part of the problem. - Describe the exact configuration, software versions, migration history, error messages and so on. Report a bug Once you have investigated, analyzed, identified and confirmed the cause of your problem and you are convinced it's a bug (rather than an X-file), go to the appropriate bug tracker and report it: - Products: the README usually tells how to report bugs * Plone Issue Tracker * CMF Issue Collector (if you don't know what the CMF is, don't report bugs) * Zope Issue Collector Do not use the bug trackers to ask for help. First analyze your problem and assert that it's a bug before you report it. 6. Version-specific migration procedures and tips In addition to the general procedure described in the previous sections, this section provides version-specific procedures and tips. If your migration does not involve a version pair specified here, then you may follow the general procedures alone. 6.1. General advice for upgrading pre-2.5 releases to the latest release To upgrade a very old version of Plone (2.1, 2.0 or 1.0), we recommend that you upgrade to Plone 2.5.x first, and *then* upgrade to later releases. The reason for this is that Plone left its old migration code behind with Plone 3.0 and later releases to make it easier for the team to keep the upgrades and code maintenance simpler. So, Plone 2.5.x has the latest and most complete version of the upgrade code that supports Plone 1.0, Plone 2.1 and Plone 2.5 — and Plone 3.0 and later releases only have the new-style upgrade code that support upgrading from 3.0 onwards. As an example, let's say you're running an ancient Plone 1.0.5 install. The approach to upgrade to (for example) Plone 3.1.6 would then be: - Back up your setup. - Move your Data.fs (and upgraded add-on products) to a Plone 2.5.x install. - Follow the general upgrade instructions outlined earlier in this manual - Once you have a running Plone 2.5.x-based version of the install, get the latest 3.x release, and upgrade from Plone 2.5.x to Plone 3.1.6 6.2. Upgrading from Plone 1.0 to 2.0 Version-specific procedures and tips for migrating Plone 1.0 to.icons. Special Note about the Windows Installer You have to uninstall previous Plone versions and delete the Plone service before you can install Plone 2 successfully on Windows XP. The service doesn't delete by itself when you uninstall. 6.directory! 6.4. Upgrading from Plone 2.1 to 2.5 Version-specific procedures and tips for the migration of Plone 2.1 to 2.5. There are no version-specific procedures or tips for the migration of Plone 2.1 to 2.5 at this time. We expect the general procedure outlined in this manual to be sufficient. 6.5. Upgrading Plone 2.5 to 3.0 Upgrading your site and your products from Plone 2.5 to Plone 3.0. 6.5.1. Upgrading a Plone 2.5 site to 3.0 Tips and issues when upgrading your site from Plone 2.5 to 3.0 - To migrate from Plone 2.5 to 3.0, please follow the steps outlined in the General approach to upgrading. - One thing to make sure you have right is that Plone is now not only files in the Products directory, but also modules inside lib/pythonin your instance. If you're using the installers, this is taken care of for you, but if you're doing it manually, make sure the lib/pythoncomponents are in the right location. Third party products If you have installed and depend on a lot of third-party products produced by developers outside the Plone Team, it's hard to say something definite - make sure the products you depend on are certified to work with Plone 3. GroupUserFolder is NOT supported! (NOTE: It may not be possible to upgrade a site using GRUF with external user folders such as LDAPUserFolder. In those cases it is advised to create a new site and move the content over manually.) If you have a big site running Plone and want a painless transition to the much-improved version 3, we suggest that you hire a company that can do the migration properly for you. Send a mail to the Plone Developer mailing list, and we can recommend a company in your area if needed. Notes on Zope migration Migration from Zope 2.8.7 or 2.9.5 to Zope 2.10.x is mandatory but Plone 3 does not run natively on Zope 3. If you are upgrading from Zope 2.8.7 and you have a separate Five product you need to delete the Five product from your product directory before your upgrade. Zope 2.10.x requires Python 2.4.3+ (Python 2.4.2 is still acceptable). Also mandatory is Python Imaging Library 1.1.5 or newer, Python ElementTree. Caching * Caching related changes required (or maybe none!) 6.5.2. Updating add-on products for Plone 3.0 Plone 3.0 ships with new releases of Zope, CMF and Archetypes. When any framework updates, some things will be removed or changed. This is a list of the most common updates that need to be applied by product authors to ensure that their products work on Plone 3.0. 6.5.2.1. General product development and migration tips for Plone 3.0 Before we get started on the specific tips for how to update your product to work with Plone 3, let's mention some general recommendations that might save you time when updating your product in the next versions of Plone (3.5 and 4.0). Depending on your product, it might be hard to include compatibility for both Plone 2.5 and Plone 3.0 in the same product. There are several reasons for this, but the main ones are: - The workflow definition standard in CMF has changed - The new portlet infrastructure (although it does support old-style portlets, performance will suffer) - The introduction of viewlets as the main way to render content fragments in the layout So, the general recommendation is: - If your product is more complex than a simple type, create two releases — one for Plone 2.5 and one for Plone 3.0. - If you used ArchGenXML to create your product, you should be able to regenerate your product from the UML model to get a Plone 3.0-compatible version. Tip - To further future-proof your product (for Plone 3.5 and 4.0), try the following: - Start Zope in debug mode using zopectl fgand use your product normally. Check if it outputs any deprecation warnings to the log window. - Disable the plone_deprecatedskin layer and make sure your application still runs without it (this disables deprecated methods and deprecated CSS styles) Other recommendations and suggestions - You can use the contentmigration product to write migrations for your own products. More information on this product can be found in the RichDocument tutorial - A lot of the new components use Zope 3 views instead of templates. These can be customized through-the-web using the portal_view_customizationstool. - Do not ever rely on the JS libraries in Plone being the same across releases. Use the KSS abstractions, the underlying implementation might (and will!) change. These things are not mandatory yet, but represent best-practice recommendations that will save you from updating these parts in the future: - QuickInstaller-based installation should use GenericSetup profiles instead - use events instead of manage_ methods (which will probably disappear in plone 3.5 or 4.0) - Packaging technology: - Use python packages instead of Zope products - Ship packages as eggs and register them with the Python Cheese Shop - Use Python Paste to create new packages 6":. 6.5.2.3. Transaction module is no longer implicitly in Archetypes In Archetypes 1.3 and 1.4, we imported transaction in the main module to work around a Zope 2.7 issue. Since Zope 2.7 is no longer a supported release, this is no longer the case in Archetypes 1.5 (which is what ships with Plone 3.0). Here's how to update your code. Typical error message when starting Zope: from Products.Archetypes import transaction ImportError: cannot import name transaction Archetypes no longer imports transaction, so you will have to do it in your own module now, if you are using it. Change occurences of: from Products.Archetypes import transaction to: import transaction For a live example, see Poi changeset 40594. 6.5.2.4. get_transaction module rename Zope has changed their syntax for getting transactions, and it has been deprecated in the the previous Zope releases for a while now. Zope 2.10.x (which is what Plone 3.0 runs on) removes the old syntax, so you have to update your code accordingly. Here's how. Typical error message: NameError: global name 'get_transaction' is not defined Just to show you a complete traceback of how this might look, here's the full thing as seen in a typical product install, where it is common to use subtransactions (for completeness and search engines): 2007-04-12 23:12:01 ERROR Zope.SiteErrorLog Traceback (innermost last): Module Products.CMFQuickInstallerTool.QuickInstallerTool, line 381, in installProduct - __traceback_info__: ('Poi',) Module Products.ExternalMethod.ExternalMethod, line 231, in __call__ - __traceback_info__: ((<PloneSite at /nu>,), {'reinstall': False}, (False,)) Module /Users/limi/Projects/Plone/3.0/Products/Poi/Extensions/Install.py, line 65, in install NameError: global name 'get_transaction' is not defined /Users/limi/Projects/Plone/3.0/Products/CMFQuickInstallerTool/QuickInstallerTool.py:409: DeprecationWarning: This will be removed in ZODB 3.7: subtransactions are deprecated; use sp.rollback() instead of transaction.abort(1), where `sp` is the corresponding savepoint captured earlier transaction.abort(sub=True) To update this, replace: get_transaction().commit(1) with: transaction.commit(1) (keep the (1) part if it already exists in the code, omit it otherwise) You might have to add an import transaction statement at the top of your file if you haven't imported it already. For a live example, see the Install.py part of Poi changeset 40594. 6.5.2.5. ContentFactoryMetadata deprecation CMF deprecated this call a while back, and Plone 3.0 is the first version that ships without this. Here's how to update your product to use the new syntax. Typical error message: Error Type: exceptions.ImportError Error Value: cannot import name ContentFactoryMetadata What causes this? Somewhere in your code, you have something like: from Products.CMFCore.TypesTool import ContentFactoryMetadata Update this to: from Products.CMFCore.TypesTool import FactoryTypeInformation instead, and you should be good to go. This change should work all the way back to Plone 2.1. For a live example, see DataGridField changeset 7901. 6.5.2.6. Update your workflows to use GenericSetup profiles To install workflows in Plone 3.0, you have to make use of CMF's GenericSetup profiles. Installing workflows in any other way is not supported, unfortunately — there are architectural changes in CMF that cannot support both approaches at the same time. Installing workflows via GenericSetup will make your product work only on Plone 2.5 and upwards, so make sure you create a special release/branch if you want your product to still work on Plone 2.1/2.0 (which are unsupported releases when Plone 3.0 is released). Typical error message that indicates that you are trying to install workflows not using GenericSetup: ImportError: cannot import name addWorkflowFactory For existing workflows, the easiest way to make the product install use GenericSetup for workflows is: - Install your product (and its workflows) using Plone 2.5. - Using the portal_setuptool in the ZMI, export a snapshot of the current site profile: - Click the Exporttab. - Select the parts you want to export the configuration for (in this case, Workflow Tool). - Click the Export Selected Stepsbutton. - You will now get a tar file named something like setup_tool-20070424225827.tar. - Unpack the tar file, and put the resulting files and directories in a directory profiles/default/in the root of your product. - Remove the workflow directories in workflow/that are not part of your product, and edit workflows.xmlso that it only has the information for your workflows. See Poi changeset 41071 for an example. - Delete your old '.py'-based workflow definitions in Extensions, but make sure you keep any workflow scripts, since these will be referenced from the profile definitions. - Add a configure.zcmlfile in the root of your product that registers the default profile. See Poi's configure.zcml for an example. - Remove the redundant code from Extensions/Install.pyand add the boilerplate code to invoke the GS setup, see Poi changeset 41071 for an example. This process is also the same for any code you want to move to GenericSetup, in the Poi example, we also moved the catalog metadata and various other things to use GenericSetup profiles, and could get rid of most of Install.py in the process. 6.5.2.7. Searching users/groups via the Membership tool is deprecated Searching for users and groups using the portal_membership and portal_groups tools has been deprectaed. Please use the search features of PAS directly or the PlonePAS pas_search browser view. XXX Need examples of this. (Wichert) 6.5.2.8. Portlets have a new infrastructure In Plone 3.0, portlets are no longer simple page templates, but objects with behaviour, logic and possibilities for advanced behaviour like per-portlet caching. Portlets have been re-implemented using the Zope 3 component architecture. Change custom portlets to use plone.app.portlets if possible. Old portlets are supported via a fallback mechanism called Classic Portlet; the portlet management screen has functionality for doing inline migration for old portlets. Note that using the old portlets mechanism will affect your site performance negatively. XXX needs examples. (Martin?) 6.5.2.9. main_template now uses Zope 3 viewlets Plone 3 has switched to use Zope 3 viewlet components instead of the old macro include approach. Any customizations of main_template.pt or header.pt will need to be updated to use the new approach. If have previously shipped customized versions of templates like header.pt, viewThreadsAtBottom.pt or global_contentmenu.pt to get things into the page, please switch to viewlets instead, as it makes it much easier for multiple products to co-exist without stepping on each others changes. Documentation and examples can be found in this tutorial. 6.5.2.10. Plone 3 does not create member folders by default With release 3.0, member folders are optional, and not created by default. This means that you can't rely on member folders to store data in or in any other way assume that there will be a members folder present. While this was always considered bad practice, it's now official. Don't do it. :) 6. 6.5.2.12. Document Actions now use Zope 3 viewlets If you were modifying or shipping custom templates for the document actions area of a Plone page, now's the time to stop. The new approach uses viewlets, and its default position has also been moved to the bottom of the page. It also defaults to a text-based representation instead of the icons that it was using earlier, since document actions are often too abstract to create good icons for. 6.5.2.13. Products installing workflows may need to add permissions If your product wants to make use of the new "Editor" role that ships with Plone 3, you will have to add explicit permissions to any workflows you add. The new "Editor" (aka. "Can Edit" on the Sharing page) in Plone 3.0 makes it easy to let people collaborate on content authoring. In some cases, editing also means the ability to add new objects inside the object people are editing. For this to work, third party content types that add custom workflows will have to either use one of the standard "add content" permissions or explicitly give Editor the Add portal content role. See Ticket #6265 for the changeset and full explanation. 6.5.2.14. Indexes declared in Archetypes schemata need to be moved to GenericSetup If you have declared indexes or metadata directly on the Archetypes field declarations, and you are using GenericSetup to install your types/FTIs, you will need to move them to GenericSetup. This applies if you have moved from using install_types() in Extensions/Install.py, to installing new content types/FTIs with GenericSetup using a types.xml import step. For each field that specifies an index, like this example from PoiIssue.py r40594 : StringField( name='issueType', index="FieldIndex:schema", widget=SelectionWidget( label="Issue type", description="Select the type of issue.", label_msgid='Poi_label_issueType', description_msgid='Poi_help_issueType', i18n_domain='Poi', ), enforceVocabulary=True, vocabulary='getIssueTypesVocab', required=True ), …you need to move the creation to catalog.xml with GenericSetup. If there is index="FieldIndex", that means you need a new index, of type FieldIndex, with the name being the name of the accessor method: <index name="getIssueType" meta_type="FieldIndex"> <indexed_attr value="getIssueType"/> </index> If there is also :schema or :metadata, e.g. index="FieldIndex:schema", you also need a metadata column: <column value="getIssueType"/> This is necessary because the schema does not really exist at install time, so there is no way GenericSetup can inspect it and configure new indexes. This was a bad design from the start, as portal-wide indexes do not belong in type-specific schemata anyway. 6.5.2.15. The "Sharing" tab is now a global action You should no longer have a 'sharing' action in the portal_types entry for a custom content type The "Sharing" tab now points to the @@sharing view, and is defined as a global action in the object category. If you have a custom content type and you have set up the local_roles action, which would normally be pointing to the folder_localrole_from template, you should remove it. It will be removed from existing, installed types during migration. If you do not remove the action, the user will see two "Sharing" tabs. For an example of the canonical set of actions and aliases, see the GenericSetup definition of the Document FTI. Of course, you may not need the References, History or External Edit actions in your own types. 6.5.2.16. Multi page schemas By default, Archetypes fields in different schemas in Plone 3.0 will be loaded all at once, without page reloads between the 'schematas'. In Plone 3.0, all fields from all schematas will be loaded at once. If you depend on your schematas (fieldsets) to be processed one page after the other, you'll need to mark your Archetypes content type that uses it (not the schema itself) with the IMultiPageSchema interface. The interface lives in Products.Archetypes.interfaces.IMultiPageSchema. The code to mark your content type would look like this: from zope import interface from Products.Archetypes.interfaces import IMultiPageSchema # ... interface.classImplements(MyContentType, IMultiPageSchema) 6.5.2.17. Enable inline editing (aka. QuickEdit) Once you have your product updated, you might want to add support for inline editing of your type. Fortunately, this is very easy. Adding inline editing and validation support to your view templates is as easy as calling the Archetypes widgets in view mode. As an example, consider the following typical code from Plone 2.5: <h1 tal: Variable goes here </h1> Now, to render the same thing, with an h1 tag and a class on it, you do: <h1 class="documentFirstHeading"> <metal:field Variable goes here </metal:field> </h1> This will keep whatever tags and styling you want around the item, and render the inline editing inside of it. It's also backwards compatible with earlier Plone versions — although these don't get the inline editing, obviously. 6.5.3. Updating 2.5.3 to 3.0.3 Specific steps (for review) on updating a Plone 2.5.3 site to 3.0.3 on Linux using the Universal Installer package. 6.5.3.1. Migration Procedure Specific steps (for review) for migrating a Plone 2.5.3 site on Linux to 3.0.3. These steps assume a previous 2.5.3 installation in the folder /var/plone/, which should be modified if necessary to suit your environment. - Download and un-archive the Plone 3 universal installer package for Linux. - Modify the install.sh script to point the PLONE_HOME variable to /var/plone/ - In the existing Plone site, take note of any non-Plone products that need to be moved to the upgraded instance. It is advisable to un-install any non-essential third-party products before migrating to a new version. In most cases, products are the biggest obstacle to migrating a site, and weeding out unnecessary products can save a great deal of time and frustration. These products can be re-installed as new packages after migration. It also seems necessary in some cases to remove installed caching objects (CacheFu), uninstall the caching products, and install new versions of the products and create new caching tools after migrating. - As the root user (or with "sudo"), shut down the existing Plone/Zope/Zeo cluster: /var/plone/zeocluster/bin/shutdowncluster.sh - Move /var/plone/ to a backup folder, such as /var/plone253/ - Run the Plone 3 install.sh script with the "zeo" cluster option: ./install.sh zeo - Start the new cluster: /var/plone/zeocluster/bin/startcluster.shThis can take some time, as a new Plone site is now created as part of the process. - Log into the ZMI as the "admin" user, using the password specified in /var/plone/zeocluster/adminPassword.txt: Once logged in, you may want to change the admin password to something more memorable (yet still secure) for future use: - Stop the new cluster: /var/plone/zeocluster/bin/shutdowncluster.sh - In /var/plone/zeocluster/server/var/, create a backup/ folder, and move all existing contents to this new folder: cd /var/plone/zeocluster/server/var/ mkdir backup mv Data.fs* backup/ Note that this step isn't completely necessary: you could just delete the existing files, but it's nice to back-up a working configuration in case things go wrong later. - Copy Data.fs from the old instance to the new installation, and ensure the permissions are correct: cp /var/plone253/zeocluster/server/var/Data.fs . chown plone:plone Data.fs - Start the new cluster: /var/plone/zeocluster/bin/startcluster.sh - Log into the ZMI as the "admin" user: Note: this step is here presently only for the purpose of a full procedure review: it may be bug-related and should not be performed as part of a base migration. Try this only if all else fails.In the ZMI, at the Plone site root, delete the following objects: - content_type_registry - mimetypes_registry - portal_transforms Note: this step is here presently only for the purpose of a full procedure review: it may be bug-related and should not be performed as part of a base migration. Try this only if all else fails.At the site root, using the Add pull-down, add new versions of the Content Types Registry, MimetypesRegistry Tool, and PortalTransforms Tool (in that order). - At the site root, click portal_migration, and in the Upgrade tab, click the Upgrade button. - After upgrading the site, click the View tab to test the main page. - Click Site Setup, and then click Add/Remove Products. - Under Installed Products, click the Migrate button to re-install any necessary existing products (in my case, this was CMFPlacefulWorkflow and Marshall). - Download and un-archive any required products to /var/plone/zeocluster/Products Make sure the product directories are complete, and that all contents have the proper owner ("plone"). - Re-start the cluster. - In Site Setup on the Plone site, in Add/Remove Products, install the new products. 6.6. Upgrading from 3.x to 3.2 Plone 3.2 is the first fully egg-based Plone release..x installation will also require some slight changes in the buildout configuration file for those who have already been using buildout configuration management in the 3.x series. Plone's installers take care of all this for you, but if you aren't using one of the installers you'll need to learn buildout — a Python configuration management tool we highly recommend — or use the Python package installer, easy_install, to install Plone. Both methods are discussed below. Windows Updates. Buildout All Plone's current installers use Buildout for configuration management. You should too, unless you're very experienced with Python packages. Buildout is the de facto standard for deploying Zope applications in a repeatable and easy way. The description of what will be installed is defined by a buildout configuration file, buildout.cfg. If you're upgrading using buildout for the first time, take a look at General advice on updating from a non-buildout to buildout-based installation. If you're updating an existing buildout, please note that the buildout files for 3.2.x look slightly different to those for 3.0 and 3.1 – they don't need a custom plone installation step as buildout can now handle it directly, here's an example of the relevant parts of buildout.cfg: [buildout] # parts: note that the plone part is no longer necessary. parts = zope2 instance ... Any other parts you've been using except "plone" # find-links: only the new dist.plone.org URL is needed. find-links = # New: this will pick up version settings for all the components. # Modify the "3.2.x" to match the version you're seeking, e.g., 3.2.2. extends = versions = versions # eggs: Plone is now specified in the egg section. All the # dependencies are automatically handled. eggs = Plone # zope part: Note the new fake-eggs settings. This is required # for Zope dependencies to be resolved during buildout. [zope2] recipe = plone.recipe.zope2install url = ${versions:zope2-url} fake-zope-eggs = true additional-fake-eggs = ZConfig ZODB3 pytz # Everything else can usually be the same. [instance] recipe = plone.recipe.zope2instance zope2-location = ${zope2:location} ... # remove any reference to the plone part: e.g., ${plone:eggs} or ${plone:products} If you have already modified your buildout.cfg file, for example to install new add-ons, remember to copy what you added to the eggs = and zcml = lines into the [instance] section. If you've installed "old style" products you'll need to copy the productdistros section and add it to parts too. After doing this, run bin/buildout -n, and your instance should update itself. Old buildouts There's been a recent change to the fake eggs mechanism that may cause a buildout error unless you delete the "develop-eggs" folder (or just its contents) from your buildout folder. It'll be recreated. Custom buildout To convert your existing custom buildout to Plone 3.2.x is very easy. The above example should be enough to make it clear what's needed, but in summary: Remove the [plone] section and its entry from parts =. Also, remove all existing ${plone:...} references, including the ones inside the [zope2] and [instance] parts. - Add the Plone egg to the eggs specification. Note that "Plone" is capitalized. - Copy the extends = and versions = directives from above into your buildout, updating the version number to the target release. - Modify the dist.plone.org line in find-links to match the version, as above. - Add the two "fake-eggs" specifications above to the zope part specification. easy_install and virtualenv If you have special reasons for using a different or no python package manager you can install Plone via easy_install alone. If you choose this route we highly recommend that you use virtualenv to create an isolated Python instance before proceeding. Python libraries — and different versions of the same library — often conflict. Plone is built on-top of the Zope application server and requires it to be installed for you to use Plone. You can install Plone directly into a python environment using the easy_install utility. easy_install Plone If you have multiple versions of Python installed you will need to use the easy_install that points to the same Python as your custom Zope install. Version migration No matter which technique you use to ugrade your Plone version, you'll need to use the portal_migrations tool in the Zope Management Interface to update your object database. This step is unchanged from past installations; see the general procedure. A word on warningsWhenever. 7. General advice on updating from a non-buildout to buildout-based installation Some hints for those stepping onto the buildout bandwagon. Beginning with Plone 3.2, we're no longer distributing Plone in the traditional tarballs (archive files) of Zope products. Instead, Plone is distributed as a set of Python Packages. These packages bear information about dependencies, and they generally provide us with a much better way of managing a complex web of Python, Zope and Plone components. Buildout, a sophisticated configuration management system from the creator of Zope, is now the recommended way for managing Plone installations. This poses a one-time challenge for folks upgrading from old to new-style installs. It should, though, make future updates much easier. The Managing projects with Buildout tutorial provides a great introduction to buildout and its use. Here, we'll just offer a few hints on making your move to buildout as painless as possible. - Give up any idea of doing an in-place update. Many of us got into the habit with earlier versions of Plone of simply unpacking the tarball for a new version into the "Products" directory of the old install. That was never a good idea for a major version update, and it's just not feasible while trying to switch to buildout. The internal layout of the files has just changed too much. Changing to buildout will make it much easier, though, to upgrade in place in the future. - Install a new, buildout-based Plone version to a different place than your old installation. Different path, different drive, different server, different hosting facility — whichever you need. - Use a current Plone installer if available (all installers for 3.2+ are buildout-based): - If you're using Linux/FreeBSD/*nix, please strongly consider using the Unified Installer. If you didn't like something about the way it worked for 2.x, please take a look again. It's a lot more versatile. It includes options to change target directory, do ZEO or stand-alone installs, and to use an already installed Python. - If you're using Darwin on a production server, it's a good idea to install the XCode tools and use the Unified Installer. You'll want the versatility. - If you're using OS X on a workstation, it's fine to use the OS X installer, which is meant to be convenient. - If you're on Windows, use the Windows installer or prepare to learn a lot. - If you don't want to use an installer, that's OK, but protect your system Python. Learn to use virtualenv, which will allow you to create isolated Python sandboxes. Install virtualenv first, create a sandbox, then use easy_install in the sandbox to install ZopeSkel. Follow the buildout tutorial's instructions for creating your buildout. - Fire up your new installation and make sure it's working. Try it out with an empty database. If you're using it on the same server, you should adjust the ports first to make sure you're not trying to use the same TCP/IP ports. This is a common error. Look for the "http-address" address in your buildout.cfg file. If you've used the Unified or OS X installers, it's even easier as the ports settings are in the top of the file. - Evaluate your add-on product list. Enumerate all the add-on Zope and Plone products installed on your own server. Divide the list into those that have egg (Python Package) updates available and those that don't. - Copy the add-on products that don't have egg versions from the "Products" directory of the old install into the "products" directory (note the small "p") of your new install. Check ownership and permissions of the copied files (failure to do this is another common error). - Add the names of new, egg-based products to the "[eggs]" section of your buildout.cfg. Check the install instructions to see if they also need a ZCML slug specification. Re-run buildout to fetch and install the new eggs. - Start your new install in foreground mode (bin/plonectl fg or bin/instance fg) to watch product loading and discover errors. Fix product problems until you have a clean start. - Copy the Data.fs file from your old install's var directory to the new one's var/filestorage directory. Check ownership and permissions! - Do the foreground start dance again. Solve problems. - Go live. A word on warnings Whenever.
http://plone.org/documentation/manual/upgrade-guide/referencemanual-all-pages
crawl-002
en
refinedweb
RE: Issue With Programmatically Impersonating a User in a Web-Par - From: Joe <Joe@xxxxxxxxxxxxxxxxxxxxxxxxx> - Date: Wed, 8 Nov 2006 14:19:02 -0800 It just occured to me that you may also be having an issue with the security context under which your smart part is running. I'd recommend following Jan Tielen's advice in the article regarding custom code policy files, if you haven't already done so. "Joe" wrote: I'd start by making some changes to your impersonation code. Yours looks a. little more complicated than it has to be, plus you've got application logic mixed in there with it. Try this code instead: To start impersonating the Sharepoint domain service account: WindowsIdentity objOriginalUser = WindowsIdentity.GetCurrent(); RevertToSelf(); WindowsIdentity.GetCurrent().Impersonate(); To stop: objOriginalUser.Impersonate(); To start impersonating a specific account: WindowsImpersonationContext wic = CreateIdentity(ACCOUNTNAME,DOMAIN,PASSWORD).Impersonate(); To stop: wic.Undo(); And you'll need this code to call that previous code: using System.Security.Principal; using System.Runtime.InteropServices; ////////////////////////////////////// #region Impersonation code")] static extern bool RevertToSelf(); #endregion "ptranfa@xxxxxxxxx" wrote: Administrator privelages, the impersonation fails (kinda the whole point for impersonation..). Any help would be MUCH appreciated. - Follow-Ups: - References: - Prev by Date: RE: Issue With Programmatically Impersonating a User in a Web-Part - Next by Date: Where to download Sharepoint Designer 2007 Beta 2 - Previous by thread: RE: Issue With Programmatically Impersonating a User in a Web-Part - Next by thread: Re: Issue With Programmatically Impersonating a User in a Web-Par - Index(es):
http://www.tech-archive.net/Archive/SharePoint/microsoft.public.sharepoint.portalserver.development/2006-11/msg00073.html
crawl-002
en
refinedweb
Re: Class export by regular DLL (vc++) be used in vb From: Ralph (msnews.20.nt_consulting32_at_spamgourmet.com) Date: 12/07/04 - Next message: Frank Adam: "Re: RichTextBox changing properties" - Previous message: Dan: "Re: Window display mystery" - In reply to: polybear: "Class export by regular DLL (vc++) be used in vb" - Messages sorted by: [ date ] [ thread ] Date: Tue, 7 Dec 2004 08:51:14 -0600 "polybear" <poly@mega> wrote in message news:%[email protected]... > > I had a simple class export by regular DLL written by vc++, > Other develop tool like VB are going to use it. > But i don't know how the refernce/declare the class in VB .... > > Can i use this class in VB directly , or should i pack this class > into ActiveX or COM object format then reference it in VB IDE? > > What is the difference between ActiveX and COM in usage? > A little confused by your question, as you appear to be referring to two separate entities. A 'regular' DLL refers to a dynamic library which exports functions (using the WINAPI convention). There are two basic methods to use these functions in your VB app. 1) Use the Declare Function directive to declare them in your VB code, or 2) Create a typelib for the DLL and reference that in your app. Of the two, IMHO, a type library is the only way to go, unless you are only going to import a few functions. (Using the Declare statement adds to the size of a program, is mildly slower than using a type library, and can lead to subtle errors if you aren't consistent.) But then you mention "classes". There is no simple way to use (import) a C++ class (native or MFC) in a VB application. The easiest and sanest way is to wrap the c++ classes with an ActiveX component (perhaps using ATL) and import that into your VB app. Perhaps even rewriting the original DLL as an ActiveX component (assuming you have the source code). So what do you really have - a 'regular' DLL or what? The practical difference between "ActiveX" and "COM" is just a matter of names and context. COM is a protocol outlining a binary standard for interprocess communication and sharing of services. The actual engine to implement COM is OLE. Back in the old days (VB1-4(16bit)) the various components were called "OLE" servers/containers/controls, etc. The M$ marketing started calling them "ActiveX Components". We very seldom ever work with COM directly, instead we use various implementations, but in general anything based on COM is called "COM". It can get even more confusing when programmers refer to "COM+" as simply "COM". COM+ is actually a collection of advanced COM runtime implementations that is embedded in the OS to provide additional services. hth -ralph - Next message: Frank Adam: "Re: RichTextBox changing properties" - Previous message: Dan: "Re: Window display mystery" - In reply to: polybear: "Class export by regular DLL (vc++) be used in vb" - Messages sorted by: [ date ] [ thread ]
http://www.tech-archive.net/Archive/VB/microsoft.public.vb.general.discussion/2004-12/0995.html
crawl-002
en
refinedweb
Vol. 12, Issue 4, 997-1007, April 2001 Institut für Biochemie, Technische Universität Graz, AustriaSubmitted September 15, 2000; Revised January 3, 2001; Accepted January 24, 2001 Three different pathways lead to the synthesis of phosphatidylethanolamine (PtdEtn) in yeast, one of which is localized to the inner mitochondrial membrane. To study the contribution of each of these pathways, we constructed a series of deletion mutants in which different combinations of the pathways are blocked. Analysis of their growth phenotypes revealed that a minimal level of PtdEtn is essential for growth. On fermentable carbon sources such as glucose, endogenous ethanolaminephosphate provided by sphingolipid catabolism is sufficient to allow synthesis of the essential amount of PtdEtn through the cytidyldiphosphate (CDP)-ethanolamine pathway. On nonfermentable carbon sources, however, a higher level of PtdEtn is required for growth, and the amounts of PtdEtn produced through the CDP-ethanolamine pathway and by extramitochondrial phosphatidylserine decarboxylase 2 are not sufficient to maintain growth unless the action of the former pathway is enhanced by supplementing the growth medium with ethanolamine. Thus, in the absence of such supplementation, production of PtdEtn by mitochondrial phosphatidylserine decarboxylase 1 becomes essential. In psd1 strains or cho1 strains (defective in phosphatidylserine synthesis), which contain decreased amounts of PtdEtn, the growth rate on nonfermentable carbon sources correlates with the content of PtdEtn in mitochondria, suggesting that import of PtdEtn into this organelle becomes growth limiting. Although morphological and biochemical analysis revealed no obvious defects of PtdEtn-depleted mitochondria, the mutants exhibited an enhanced formation of respiration-deficient cells. Synthesis of glycosylphosphatidylinositol-anchored proteins is also impaired in PtdEtn-depleted cells, as demonstrated by delayed maturation of Gas1p. Carboxypeptidase Y and invertase, on the other hand, were processed with wild-type kinetics. Thus, PtdEtn depletion does not affect protein secretion in general, suggesting that high levels of nonbilayer-forming lipids such as PtdEtn are not essential for membrane vesicle fusion processes in vivo. The zwitterionic phospholipid phosphatidylethanolamine (PtdEtn) has a strong tendency to form nonbilayer structures and is the most abundant phospholipid of this type in eukaryotic cells (reviewed by de Kruijff, 1997 ). The potential of membranes with high PtdEtn content to undergo laminar-hexagonal phase transition has been proposed to affect membrane-membrane contact and bilayer fusion during processes of vesicle formation and vesicle-mediated protein trafficking. In addition, nonbilayer lipids may affect integration of proteins into membranes, their lateral movement within the membrane, and folding and stabilization of certain membrane protein complexes. The most prominent biological system that has provided both genetic and biochemical evidence for specific roles of PtdEtn in cell function is Escherichia coli (reviewed by Dowhan, 1997 ). In this prokaryote, lack of PtdEtn can be compensated by elevated levels of cardiolipin (CL) in the presence of divalent cations, thereby maintaining the potential of bilayer-to-nonbilayer phase transition of membranes (Morein et al., 1996 ). A PtdEtn-deficient E. coli mutant displays complex phenotypic changes, including filamentous growth (Mileykovskaya et al., 1998 ) and decreased activity of lactose permease. The latter observation was ascribed to misfolding of the permease due to lack of PtdEtn that acts as a molecular chaperone for this transporter (Bogdanov et al., 1999 ). In vitro, nonbilayer lipids stimulate the activity of the reconstituted bacterial protein translocase (van der Does et al., 2000 ). Biosynthesis of PtdEtn in Saccharomyces cerevisiae can be accomplished by two de novo pathways of phosphatidylserine (PtdSer) formation and decarboxylation and by the cytidyldiphosphate (CDP)-ethanolamine branch of the Kennedy pathway (Figure 1). In this organism, PtdEtn is synthesized primarily by the two de novo pathways (reviewed by Daum et al., 1998 ). Decarboxylation of PtdSer by phosphatidylserine decarboxylase 1 (Psd1p) occurs in the inner mitochondrial membrane (Zinser et al., 1991 ), whereas phosphatidylserine decarboxylase 2 (Psd2p) was localized to a Golgi/vacuolar compartment (Trotter and Voelker, 1995 ). Methylation of PtdEtn by PtdEtn methyltransferases 1 (Pem1p) and 2 (Pem2p) yields phosphatidylcholine (PtdCho), the final product of the de novo route of aminoglycerophospholipid synthesis. Ethanolamine (Etn) or choline (Cho) exogenously added to a yeast culture or endogenously formed through lipolytic processes is used for PtdEtn or phosphatidylcholine (PtdCho) synthesis via the Kennedy pathway. The initial enzymes of this branched pathway, ethanolamine kinase (Eki1p) and choline kinase (Cki1p), have overlapping substrate specificities with Eki1p being primarily responsible for Etn phosphorylation and Cki1p for Cho phosphorylation (Kim et al., 1999 ). Both gene products together represent the total cellular ethanolamine and choline kinase activities in S. cerevisiae. Ethanolamine phosphate (Etn-P) and choline phosphate (Cho-P) are activated by reaction with cytidyltriphosphate (CTP), and cytidyldiphosphate ethanolamine (CDP-Etn) and cytidyldiphosphate choline (CDP-Cho) are finally linked to diacylglycerol to yield PtdEtn and PtdCho. A cpt1 ept1 double mutant, which is defective in the final steps of this pathway, is viable, suggesting that in yeast the Kennedy pathway is not essential under standard growth conditions (McGee et al., 1994 ). The Kennedy pathway is linked to sphingolipid catabolism through a reaction catalyzed by dihydrosphingosine-phosphate lyase (Dpl1p). This enzyme cleaves phosphorylated sphingoid base to long chain aldehyde and ethanolaminephosphate (Etn-P) (Saba et al., 1997 ) allowing incorporation of the latter component into PtdEtn through the Kennedy pathway (Mandala et al., 1998 ). This finding is consistent with the observation of Hikiji et al. (1988) that cho1 cells, which are defective in phosphatidylserine synthase, accumulated some PtdEtn on choline-supplemented media. Yeast PtdSer is synthesized from cytidyldiphosphate diacylglycerol (CDP-DAG) and serine (Ser) by the action of PtdSer synthase Cho1p (Figure 1), which is localized to the endoplasmic reticulum (reviewed by Daum et al., 1998 ). Mutants deleted of CHO1 do not contain detectable amounts of PtdSer and are auxotrophic for Cho or Etn, indicating that Cho1p is the only PtdSer synthase in yeast and that PtdSer is not essential (Atkinson et al., 1980 ). Although PtdSer-deficient yeast cells are viable, they exhibit a number of defects such as decreased tryptophan transport activity (Nakamura et al., 2000 ) and abnormal vacuolar function and morphogenesis (Hamamatsu et al., 1994 ). Mutants defective in either of the PtdSer decarboxylases, Psd1p or Psd2p, grow like wild-type on glucose medium, but psd1 psd2 double mutants are auxotrophic for Etn or Cho (Trotter and Voelker, 1995 ). The fact that cho1 and psd1 psd2 mutants can be rescued by Cho alone suggested that PtdCho is an essential lipid, and that PtdEtn is either nonessential or can be synthesized in adequate amounts from the Etn-P provided by sphingolipid breakdown. The essentiality of PtdCho was supported by the observation that strains defective in both methyltransferases, Pem1p and Pem2p, are auxotrophic for Cho (Summers et al., 1988 ; Kodaki and Yamashita, 1989 ); thus, PtdEtn alone does not fully substitute for the methylated phospholipids. The high level of PtdEtn in mitochondria (Tuller et al., 1999 ) and the presence of Psd1p in the inner mitochondrial membrane suggest a specific requirement of this organelle for PtdEtn. To investigate the contributions and relative efficiencies of the three pathways of PtdEtn synthesis described above, we 1) genetically dissected each of these pathways, 2) analyzed the specific role of mitochondrial PtdEtn production, and 3) studied the efficiency of PtdEtn import into mitochondria. We demonstrate that the requirement for PtdEtn is more stringent on nonfermentable than on fermentable carbon sources and that PtdEtn is imported into mitochondria only with moderate efficiency. Yeast Strains, Plasmids, and Culture Conditions Strains and plasmids used in this study are listed in Table 1. The open reading frames of PSD1, PSD2, and CHO1 were replaced by the KanMX4 marker by using a polymerase chain reaction (PCR)-mediated one-step (PSD1 and CHO1) or two-step (PSD2) gene replacement strategy (Wach et al., 1994 ). Positions 4 to 1162 of PSD1 (total length 1503 bp), positions 1 to 3427 of PSD2 (total length 3416 bp), and positions 4 to 828 of CHO1 (total length 831 bp) were replaced by using primers listed in Table 2. These constructs were used for transformation of the diploid wild-type strain FY1679 (Table 1). Upon tetrad dissection, the deletions showed 2:2 segregation as monitored by kanamycin resistance. Diploid and haploid deletion strains were tested for proper insertion of the KanMX4 marker by colony PCR with appropriate primers (Table 2). Double and multiple deletion mutants were then obtained by standard genetic methods and verified by colony PCR analysis. Plasmids pRB1 and pRB2 were isolated from YCp50 (Rose et al., 1987 ) and YEp24 (Carlson and Botstein, 1982 ) yeast genomic libraries by their ability to suppress the Etn-requirement of a psd1 psd2 double deletion strain. Plasmid pRB1 carries PSD1 and pRB2 contains PSD2 as verified by PCR and restriction mapping. Standard techniques of E. coli molecular biology were used throughout the work (Ausubel et al., 1996 ). Plasmids were introduced into yeast cells by lithium acetate transformation (Gietz et al., 1992 ). To confirm synthetic lethality of the psd1 psd2 cki1 triple mutant, the strain was tested for loss of plasmid pRB2 by cultivation on solid synthetic medium containing 1 mg/ml 5'-fluoroorotic acid (PCR, Gainesville, FL) and 5 mM Etn, Cho, or Ser. Yeast strains were grown under aerobic conditions at 30°C on YP medium (1% yeast extract, 2% bacto peptone) containing 2% glucose (YPD) or lactate (YPLac) as a carbon source. It has to be noted that YP media contain low amounts of Etn and Cho. Growth tests were performed on solid synthetic minimal medium (Sherman et al., 1986 ) containing 2% glucose, ethanol, or lactate and 2% Bactoagar (Difco, Detroit, MI). Supplemented media contained 5 mM Etn, Cho, or Ser unless otherwise stated. To study growth in liquid YP media, precultures grown to the stationary phase were diluted 1:500 (vol/vol) in fresh medium, and optical density at 600 nm was measured at the time points indicated. Respiration-deficient cells (petites) in YP medium were detected by serial dilution and plating an equal number of cells on YPD and YPLac medium. Cell Fractionation Total homogenates and mitochondria were prepared from spheroplasts by published procedures (Daum et al., 1982 ; Zinser et al., 1991 ). Relative enrichment of markers and cross-contamination of subcellular fractions were assessed as described by Zinser and Daum (1995) . Analytical Procedures Lipids were extracted by the procedure of Folch et al. (1957) . Individual phospholipids were separated by two-dimensional thin-layer chromatography on Silica gel 60 plates (Merck, Darmstadt, Germany) thin-layer chromatography plates by staining with iodine vapor, scraped off the plate, and quantified by the method of Broekhuyse (1968) . Protein was quantified by the method of Lowry et al. (1951) using bovine serum albumin as a standard. Spectrophotometric quantification of mitochondrial cytochromes was carried out by the method of Watson et al. (1975) by using a Hitachi U2310 double beam spectrophotometer. Enzymatic activity of cytochrome c oxidase was measured as described by Mason et al. (1973) and that of cardiolipin synthase and phosphatidylglycerol phosphate synthase as described by Tuller et al. (1998) . PtdSer decarboxylase activity was measured. Protein Secretion Analysis of Gas1p maturation was performed with homogenates of cells grown to the late logarithmic phase in minimal medium in the presence of supplements as indicated. Homogenates were prepared by disintegrating cells with glass beads in a Merkenschlager homogenizer under CO2 cooling in the presence of 10 mM Tris-HCl, pH 7.2, 1 mM phenylmethylsulfonyl fluoride (Calbiochem, La Jolla, CA). Western blot analysis by using a primary rabbit antibody against Gas1p was performed as described by Haid and Suissa (1983) . Immunoreactive bands were visualized by enzyme-linked immunosorbent assay with a peroxidase-linked secondary antibody (Sigma, St. Louis, MO) following the manufacturer's instructions. Carboxypeptidase Y maturation was monitored by pulse-chase labeling and immunoprecipitation essentially as described by Munn et al. (1999) . As minor modifications, cells were grown in synthetic minimal medium supplemented with 2% glucose and 5 mM Cho, and samples were taken during the chase period. Invertase secretion was assayed according to Munn et al. (1999) with the modification that cells were grown in synthetic minimal medium supplemented with 5% glucose and 5 mM Cho and induced by resuspension in synthetic minimal medium containing 0.05% glucose, 2% sucrose, and 5 mM Cho. Synthesis of PtdEtn Is Essential in Yeast To study the requirement for PtdEtn and the relative contribution of the different pathways to PtdEtn synthesis a series of haploid single and multiple deletion strains with defects in the different biosynthetic routes of PtdEtn synthesis were constructed (Table 1) and tested for their growth phenotype on defined media containing different carbon sources (Table 3). As recognized previously (Atkinson et al., 1980 ; Trotter et al., 1995 ) single deletions of PSD1 and PSD2 did not affect growth on glucose medium, but strains deleted in both PtdSer decarboxylases (psd1 psd2 ) or PtdSer synthase (cho1 ) were auxotrophic for either Etn or Cho. To study the effect of the Dpl1p-dependent salvage pathway on PtdEtn production, and to investigate whether this pathway is required for psd1 psd2 or cho1 mutants to grow on Cho-supplemented media, we analyzed the growth phenotype of a psd1 psd2 dpl1 triple deletion strain. The triple mutant was strictly auxotrophic for Etn and could not be grown by Cho supplementation alone (Table 3). Overexpression of Dpl1p from plasmid p24-3 (Table 1) in psd1 psd2 and cho1 strains relieved their requirement for Etn or Cho (Table 3). We conclude from these findings that a minimum of PtdEtn is required for growth on glucose, and that in psd1 psd2 or cho1 strains this pool of essential PtdEtn can be provided via Dpl1p-dependent sphingolipid catabolism. Requirement for PtdEtn Is More Stringent on Nonfermentable Than on Fermentable Carbon Sources In contrast to cultivation on glucose-containing media, a psd1 mutant strain failed to grow on media with lactate or ethanol as the carbon source unless it was supplemented with Etn, Cho, or Ser (Table 3). This defect was fully rescued by expression of Psd1p derived from the centromeric plasmid pRB1 (Table 1) in the psd1 background. Addition of Etn, Cho, or Ser to the medium improved growth of the psd1 mutant on nonfermentable carbon sources to some extent but not to the wild-type level. Whereas Etn and Cho supplementation should directly enhance PtdEtn and PtdCho formation through the Kennedy pathway, supplementation with Ser resulted in enhanced PtdSer synthesis as indicated by the rise of PtdSer from ~10 mol% of total phospholipids in psd1 psd2 grown on Etn- or Cho-supplemented medium to 17 mol% in cells grown on Ser-supplemented medium (our unpublished results). This elevated level of PtdSer might increase the substrate level for Psd2p. The possibility that enhanced serine palmitoyl transferase-dependent synthesis of sphingoid bases (Nagiec et al., 1994 ) and increased hydrolysis of phosphorylated sphingoid bases by Dpl1p accounted for the Ser auxotrophy was ruled out because deletion of DPL1 in the psd1 mutant background did not affect growth on lactate or ethanol in the presence of Ser (Table 3). Taken together, these results indicate that enhanced synthesis of PtdSer or increased formation of Ptd-Etn or PtdCho through the Kennedy pathway can rescue growth of psd1 mutant cells on nonfermentable carbon sources. In contrast, psd1 psd2 or cho1 mutants are strictly auxotrophic for Etn on nonfermentable carbon sources (Table 3), suggesting that their capacity to synthesize PtdEtn through Dpl1p-dependent turnover of sphingoid bases was not sufficient to fulfill the elevated requirement of PtdEtn under these conditions. Single deletions of EKI1 or CKI1, which encode the initial enzymes of the Kennedy pathway, did not affect growth of mutants deleted for PSD1 or PSD2 on glucose medium (Table 3). On lactate or ethanol containing medium, however, a psd1 eki1 double mutant was auxotrophic for Etn, Cho, or Ser, and a psd1 cki1 mutant strain was a strict Etn auxotroph (Table 3), indicating that Etn is more specifically used as a substrate by Eki1p than by Cki1p. Moreover, the psd1 cki1 dpl1 mutant grew on Etn-, and the psd1 eki1 dpl1 mutant grew on Etn, Cho, or Ser-supplementation (Table 3). As expected, a psd1 cki1 eki1 triple mutant failed to grow on Etn-, Cho-, or Ser-supplemented synthetic lactate or ethanol medium, because Psd2p and Dpl1p did not provide enough PtdEtn for cells grown on nonfermentable carbon sources. Thus, Psd2p in combination with either Eki1p or Cki1p was sufficient for maintaining the required pool of PtdEtn for cells grown on lactate or ethanol. In the absence of both Psd1p and Psd2p, Etn-utilization was more efficient by Cki1p than by Eki1p. A psd1 psd2 cki1 triple deletion was lethal (Table 3), whereas a psd1 psd2 eki1 triple deletion strain grew like a psd1 psd2 double mutant on lactate (except for a slightly higher requirement for Etn) and a psd1 psd2 eki1 dpl1 quadruple deletion mutant grew on fermentable and nonfermentable carbon sources supplemented with Etn. Thus, the function of Eki1p and Dpl1p in a psd1 psd2 cki1 triple mutant background was not sufficient for growth, whereas psd1 psd2 with intact Cki1p could grow if given appropriate supplementation. PtdEtn Level in Mitochondria Is Growth Limiting on Nonfermentable Carbon Sources To quantify the effects of mitochondrial and extramitochondrial de novo synthesis of PtdEtn, we followed growth of mutants with defects in PtdEtn synthesis in liquid YP media (Figure 2). Consistent with the observations made on defined solid media, more Etn was required on nonfermentable than on fermentable carbon sources for strains deficient in biosynthesis of PtdEtn. Etn supplementation of YPLac enhanced growth of psd1 psd2 and cho1 mutants to a level comparable to that of psd1 but not further. Growth of psd1 on YPLac was not further enhanced by Etn supplementation because YPLac contains low amounts of Etn and Cho. These data suggest that, in the absence of mitochondrial production of PtdEtn, namely, in psd1 or cho1 strains, transport of PtdEtn to mitochondria becomes growth limiting. Under these conditions, the increased requirement of PtdEtn cannot be satisfied by supply with extramitochondrially synthesized PtdEtn. This notion was confirmed by determining the phospholipid composition of isolated mitochondria of the various mutant strains grown on lactate with or without supplementation of 5 mM Etn (Table 4). This analysis revealed that the growth rate of the different strains on lactate correlated with the mitochondrial PtdEtn content. In a psd1 strain, mitochondrial PtdEtn was reduced to ~25% of the corresponding wild-type level; in psd1 psd2 and cho1 strains, PtdEtn was reduced even more dramatically to an apparently minimal level of 1-2%. This decrease of PtdEtn is compensated for by elevated levels of PtdCho, PtdSer, and phosphatidylinositol (PtdIns). Etn supplementation did not increase the PtdEtn content of mitochondria significantly, indicating that the low amount of PtdEtn available in the extramitochondrial space is not preferentially imported into mitochondria, but rather used for production of PtdCho through the methylation pathway. The predominant role of Psd1p in supplying mitochondria, relative to the total cell homogenate, with PtdEtn is apparent from the significant enrichment of PtdEtn in mitochondria in strains with intact PSD1 (Table 4). In contrast, mitochondria from psd1 , psd1 psd2 , or cho1 mutants have a PtdEtn level more comparable to that found in nonmitochondrial subcellular membranes. In mitochondria of psd1 and psd1 psd2 mutants, the CL content is also reduced to ~50% of the Etn-supplemented control (Table 4). To investigate whether the low PtdEtn content in mitochondria directly affects the activity of enzymes involved in CL biosynthesis, we performed in vitro enzyme assays for CL synthase and phosphatidylglycerophosphate (PtdGro-P) synthase. After growth on YPLac without Etn supplementation specific activity of PtdGro-P synthase was twice as high in mitochondria of psd1 psd2 (0.81 ± 0.04 nmol/min × mg protein) compared with the wild-type (0.44 ± 0.03 nmol/min × mg protein), whereas the specific activity of CL synthase was similar in mitochondria of both strains (0.081 ± 0.011 nmol/min × mg protein). Thus, the observed reduction of CL is not due to reduced activities of the enzymes involved in the biosynthesis of this phospholipid. PtdEtn Depletion Does Not Cause Obvious Damage to Mitochondrial Membranes To examine whether enzymes of the respiratory chain depend on the mitochondrial content of PtdEtn we measured the specific activity of cytochrome c oxidase in mitochondria of psd1 , psd1 psd2 , cho1 , and psd2 deletion strains grown on YPLac with or without Etn supplementation. In all mutants tested this enzyme activity was similar to wild-type (0.19 ± 0.01 µmol/min × mg). Furthermore, spectroscopic analysis displayed a similar content of cytochromes aa3, b, and c in all of these strains (our unpublished results). Finally, the mitochondrial protein pattern and morphology were not affected by PtdEtn depletion either. The latter observation was made by using the green fluorescent protein (GFP)-tagged mitochondrial matrix protein CoxIVp (kindly provided by R.E. Jensen, John Hopkins University of Medicine, Baltimore, MD), and the membrane potential-dependent fluorescent dyes MitoTracker CMXRos (Molecular Probes, Eugene, OR) and 4-(4-dimethylaminostyryl)-N-methyl-pyridinium iodide (Molecular Probes) as probes. Similarly, electron microscopic analysis did not show any obvious structural alterations of mitochondria or other organelles in the psd1 strain (our unpublished results). These results demonstrate that the mitochondrial defect of PtdEtn-depleted strains is not due to gross changes of their mitochondrial membranes. Reduced Level of PtdEtn Induces Formation of Respiration-deficient Cells (Petites) An increased rate of respiration-deficient cell (petite) formation is a common phenotype of various mutants defective in lipid biosynthesis. This phenotype was described for cho1 (Atkinson et al., 1980 ) and psd1 mutant strains (Trotter et al., 1993 ), suggesting that mitochondria are more sensitive to alterations of their phospholipid composition than other organelles. Our results presented here confirm and extend these observations. Cultures of cells lacking the capacity to form mitochondrial PtdEtn (psd1 , psd1 psd2 , and cho1 ) contained ~50% petites after 2 d of growth in YPD medium, irrespective of Etn supplementation. The observation that spontaneously produced petite cells are growth arrested on nonfermentable carbon sources but remain fully viable for up to 4 d, allowed us to determine the rate of spontaneous petite formation by counting the number of petite cells at two different time points. In YPLac medium ~10% of the cells lacking mitochondrial PtdEtn synthesis formed petites independent of growth phase and Etn supplementation (Table 5). Thus, during each cell division ~10% of psd1 , psd1 psd2 , and cho1 cells become respiratory deficient. After three to four rounds of subculturing these strains on glucose-containing medium (YPD), petites accumulated and the entire culture became respiratory deficient. As a control, we examined whether the poor growth of strains with defects in PtdEtn biosynthesis on nonfermentable carbon sources was due to a reduction of their viability. For this purpose psd1 , psd1 psd2 , cho1 , psd2 mutant strains and wild-type were grown to mid-logarithmic growth phase on lactate (YPLac) with or without Etn supplementation and stained with acridine orange (Molecular Probes). Viability was close to 100% in all cultures, indicating that deficiency of PtdEtn directly or indirectly leads to cessation of growth on nonfermentable carbon sources but not to cell death. Overproduction of Psd1p in Wild-Type Does Not Affect the Mitochondrial Phospholipid Composition Because the amount of PtdEtn in mitochondria of the psd1 mutant seems to be growth limiting, we examined whether 1) this parameter is also growth limiting in wild-type, and 2) whether the amount of mitochondrial PtdEtn can be increased over the wild-type level by overexpression of Psd1p. The haploid wild-type strain YBR1 transformed with the 2 µ plasmid pRB3 carrying PSD1 (Table 1) and the control strain bearing the empty vector grew with almost identical rate on YPLac. Surprisingly, the phospholipid composition of mitochondria and cell homogenate was not affected by overexpression of Psd1p (our unpublished results), even though the specific activity of PtdSer decarboxylase in vitro was 13-fold increased in the cell homogenate (3.7 nmol/min × mg protein) and 17-fold in mitochondria (27.6 nmol/min × mg protein) over wild-type. These observations indicate that mitochondrial production of PtdEtn is most likely limited by the rate of import of PtdSer into mitochondria but not by the activity of Psd1p. PtdEtn Is Required for Glycosylphosphatidylinositol (GPI)-Anchor Synthesis, but not for Secretion of carboxypeptidase Y (CPY) and Invertase PtdEtn is the precursor for the Etn-P bridge that links the GPI-anchor to the carboxyl-terminal amino acid of proteins (Menon and Stevens, 1992 ). Thus, depletion of PtdEtn was expected to affect the formation of GPI-anchored proteins. To test this prediction, we studied maturation of the GPI-anchored Gas1p in strains deleted of one or both PtdSer decarboxylases on synthetic glucose medium supplemented with Etn or Cho. Western blot analysis (Figure 3A) revealed that the 100-kDa Gas1p-precursor of the endoplasmic reticulum accumulated in the psd1 psd2 double mutant. The precursor band appeared strongest, and the amount of the mature 125-kDa Golgi form of Gas1p was reduced concomitantly, when the strain was grown with Cho supplementation, i.e., when the level of PtdEtn was decreased to the minimum of less than 1 mol% of total phospholipids compared with 20 mol% of wild-type. The Gas1p precursor accumulated, although in a less pronounced way, in the Etn-supplemented psd1 psd2 mutant, supporting the view that Etn is the more efficient precursor for maintaining the PtdEtn pool in this strain. Because GPI-anchor-linked and -unlinked Gas1p precursor of the endoplasmic reticulum is not separated by gel electrophoresis, the analysis described above did not discriminate between a defect in GPI-anchor biosynthesis or a defect in sorting or transport of GPI-anchored proteins from the endoplasmic reticulum to the Golgi. For this reason, we investigated whether PtdEtn deficiency affects the secretory pathway in general by measuring the kinetics of CPY maturation and the rate of invertase secretion in psd1 psd2 grown on synthetic glucose medium supplemented with Cho. Mature CPY was formed in psd1 psd2 and wild type with similar kinetics (Figure 3B). External and internal invertase activities were also similar in both strains (Figure 4). These results demonstrate that secretion, in general, is not affected by PtdEtn deficiency. Thus, PtdEtn appears to be required for GPI-anchor biosynthesis, although we cannot exclude that the sorting of GPI-anchored proteins into secretory vesicles is specifically affected by the altered lipid environment of compartments involved in this process. The spatial separation of the PtdEtn biosynthetic pathways and their different efficiencies explain why the various biosynthetic routes are not equally well suited to meet the requirements for PtdEtn in yeast on fermentable and nonfermentable carbon sources. A minimum level of PtdEtn, as formed through sphingolipid turnover, is sufficient for growth on glucose provided that PtdCho is synthesized from Cho via the CDP-Cho (Kennedy) pathway. On nonfermentable carbon sources, a higher level of cellular PtdEtn is required, and mitochondrial synthesis of PtdEtn by Psd1p becomes paramount. Under these conditions, psd1 psd2 and cho1 deletion mutant strains are strictly auxotrophic for Etn because the formation of endogenous Etn-P by Dpl1p is not sufficient to synthesize the amount of PtdEtn that is required. Similarly, the Psd2p pathway alone does not provide enough PtdEtn for growth of a psd1 strain on nonfermentable carbon sources, and increased PtdSer biosynthesis (with exogenous Ser) or the Kennedy pathway (with exogenous Etn or Cho) becomes essential. We conclude from these results 1) that PtdEtn production through the Kennedy pathway is more efficient than decarboxylation of PtdSer by Psd2p, and/or 2) that transport of PtdEtn into mitochondria is growth rate-limiting and less efficient from the site of Psd2p action than from the site where PtdEtn is synthesized through the Kennedy pathway. It has to be noted, however, that neither of these enzymatic steps has yet been unambiguously localized. On glucose media, the Kennedy pathway is only essential when cells lack Cho1p or both Psd1p and Psd2p. In a psd1 psd2 background, deletion of CKI1 is lethal, whereas deletion of EKI1 causes only a higher requirement for Etn, especially on nonfermentable carbon sources. A psd1 cki1 double mutant does not efficiently use Cho as a source for PtdCho biosynthesis because Eki1p appears to be specific for Etn and has a rather poor Cho kinase activity. Furthermore, deletion of CKI1 (Kim et al., 1999 ), in contrast to deletion of EKI1, reduces the activity of the Cho and Etn transporter Ctr1p and may thus contribute to the synthetic lethality of a psd1 psd2 cki1 triple mutant. In contrast to psd1 and psd1 eki1 , the psd1 cki1 mutant does not grow on Ser-supplemented media containing nonfermentable carbon sources. In psd1 cki1 cells, the enhancement of PtdSer synthesis is probably insufficient for growth because of the limited capacity of this strain to recycle Cho formed by phospholipase D. These results also support the view that Eki1p has low Cho kinase activity, resulting in a decreased level of PtdCho in psd1 cki1 cells that cannot be compensated by increased formation of other lipids. Without taking into account specific effects of EKI1 and CKI1 deletion on transport of Etn and Cho, and a possible regulatory interaction of pathways of PtdEtn biosynthesis, we can define a ranking for the efficiency of the different PtdEtn biosynthetic routes in yeast as follows: Psd1p > Cki1p > Psd2p > Eki1p > Dpl1p. The second part of this study was focused on possible roles of PtdEtn in yeast, especially in mitochondria. Growth analysis of the various mutant strains revealed a requirement of PtdEtn for mitochondrial function. Consistent with such a function of PtdEtn, we found that the growth defects of psd1 , psd1 psd2 , and cho1 strains on lactate medium correlated with the PtdEtn content in the mitochondria of these strains. Because Etn supplementation and thus stimulation of the Kennedy pathway did not have a pronounced effect on the PtdEtn content of mitochondria from psd1 , psd1 psd2 , and cho1 mutant strains, we conclude that in addition to its limited formation, PtdEtn is also insufficiently supplied to mitochondria, and thus becomes growth limiting, when mitochondrial synthesis of PtdEtn is absent. This observation is in line with previous results from our laboratory obtained by pulse-chase labeling, which showed that PtdEtn produced by Psd2p or via the Kennedy pathway can be imported into mitochondria, although only at a low rate (Bürgermeister et al., 2000 ). However, extramitochondrial PtdEtn formed in strains lacking Psd1p does not accumulate in the extramitochondrial space even under Etn supplementation, because it is more efficiently methylated to PtdCho than imported into mitochondria (our unpublished results). Phospholipid biosynthesis is coordinately regulated in response to inositol availability. Some key enzymes of lipid biosynthesis are repressed by exogenous inositol and derepressed when inositol becomes limiting (reviewed by Henry and Patton-Vogt, 1998 ). Deletion of CHO1 or PSD1 results in a derepression of phospholipid biosynthetic genes and in overproduction and excretion of inositol (Opi-phenotype). Etn supplementation of cho1 or psd1 relieves this Opi-phenotype and restores regulation in response to inositol (Griac, 1997 ). This regulatory circuit appears to account for the increased level of PtdIns in psd1 psd2 and cho1 strains through derepression of IN01, encoding for inositol-1-phosphate synthase, and may also be responsible for the decreased formation of CL in psd1 and psd1 psd2 mutants on YPLac (Table 5). Like Ino1p, but unlike CL synthase, PtdGro-P synthase has been shown to be regulated in response to inositol (Greenberg et al., 1988 ; Shen and Dowhan, 1998 ). This regulation explains the increased in vitro activity of PtdGro-P synthase, but the unchanged activity of CL synthase, in mitochondria of psd1 and psd1 psd2 strains (see RESULTS). The discrepancy between in vitro activities of enzymes involved in CL formation and the decreased content of CL may be explained as a result of direct competition of PtdIns synthase, PtdSer synthase, and PtdGro-P synthase for the common substrate CDP-DAG or by noncompetitive inhibition of PtdSer synthase by inositol (Kelley et al., 1988 ). This hypothesis is in line with the observation that lack of PtdSer biosynthesis in cho1 cells results in the formation of wild-type levels of CL, while at the same time PtdIns biosynthesis is induced. Furthermore, the PtdSer content of a psd1 psd2 strain grown in the presence of Etn supplementation is significantly higher than that in cells grown without Etn supplementation, i. e., when CDP-DAG is preferentially used for PtdIns biosynthesis (Table 4). Thus, there appears to be no direct link between the growth phenotype on lactate of mutants with defects in PtdEtn biosynthesis and the CL content of mitochondria (Figure 2; Table 4), but rather an indirect link through regulatory phenomena. It is noteworthy, however, that the sum of CL and PtdEtn might be important for maintaining yeast mitochondrial function, as has been shown previously for E. coli membranes (Rietveld et al., 1993 ). This hypothesis is supported by the observation that a yeast strain bearing a mutation of the CL synthase gene CRD1 exhibits an elevated level of PtdEtn (Tuller et al., 1998 ), and the fact that deletions of CHO1 and PGS1 (PtdGro-P synthase) are synthetically lethal (Janitor et al., 1996 ). No obvious defects of mitochondrial morphology, membrane potential, or respiratory enzymes were detected as a consequence of a low PtdEtn content. The growth defects of psd1 , psd1 psd2 , and cho1 strains on nonfermentable carbon sources are at least partly due to the spontaneous formation of respiration-deficient cells. We do not know at present whether the mitochondrial genome is completely lost ( 0) or only partly deleted ( ) in these strains. We can also only speculate as to whether this defect is due to a reduced efficiency of anchoring the nucleoid to the inner mitochondrial membrane or to more direct effects on the replication/partitioning apparatus itself. The viability of PtdEtn biosynthesis mutants is not reduced on nonfermentable carbon sources, indicating that the severe growth defect of psd1 psd2 and cho1 strains is due to a dramatically decreased growth rate, but not to cell death. These results support data reported by Griac et al. (1996) , who observed a similar effect with a cho1 mutant grown in the absence of Etn or Cho. The cessation of growth may result from an indirect effect of PtdEtn depletion, e.g., reduced ATP synthesis, or may be the consequence of a more direct effect, such as a membrane status-dependent control mechanism for growth. Studies using an ept1 cpt1 double deletion mutant (Menon and Stevens, 1992 ) revealed that PtdEtn but not Etn, Etn-P, or CDP-Etn is the precursor for GPI-anchor biosynthesis. Our data support this result in that depletion of PtdEtn leads to a maturation defect of Gas1p. GPI-anchor biosynthesis is essential in yeast (Orlean et al., 1994 ), which may be one reason for the requirement of a minimum level of PtdEtn. Most remarkably, however, the general secretory pathway is not affected by PtdEtn deficiency (Figures 3B and 4). This finding is surprising because the unique biophysical property of PtdEtn to form nonbilayer structures has been thought to affect vesicle-mediated protein trafficking (de Kruijff, 1997 ). It seems unlikely that the residual amount of less than 1 mol% PtdEtn of total phospholipids in the psd1 psd2 strain can fulfill this biophysical requirement. Instead, it appears that in yeast PtdEtn is largely dispensable as a structural component of membranes, and that compensatory effects that are presently unknown may maintain the biophysical properties of the membranes required for cell viability. We thank G. Zellnig from the Institut für Pflanzenphysiologie, Karl-Franzens Universität Graz, Austria, for electron microscopic analysis of the psd1 mutant strain. The kind supply of the MSS204 strain and the p24-3 plasmid by R.C. Dickson (University of Kentucky, Lexington, KY), and of the Gas1p- and CPY-specific antibodies by H. Riezman (Biocenter Basel, Switzerland) is appreciated. This work was financially supported by the Fonds zur Förderung der Forschung Österreich (projects 12076 and 14468 to G.D., and project 13767 to R.S). * Corresponding author. E-mail address: guenther.daum{at}tugraz.at. Abbreviations used: CDP-Cho, cytidyldiphosphate choline; CDP-DAG, cytidyldiphosphate diacylglycerol; CDP-Etn, cytidyldiphosphate ethanolamine; Cho, choline; Cho-P, cholinephosphate; CL, cardiolipin; CPY, carboxypeptidase Y; CTP, cytidyltriphosphate; Etn, ethanolamine; Etn-P, ethanolaminephosphate; GFP, green fluorescent protein; GPI, glycosylphosphatidylinositol; PtdCho, phosphatidylcholine; PtdEtn, phosphatidylethanolamine; PtdGro-P, phosphatidylglycerophosphate; PtdIns, phosphatidylinositol; PtdSer, phosphatidylserine; Ser, serine. This article has been cited by other articles:
http://www.molbiolcell.org/cgi/content/full/12/4/997
crawl-002
en
refinedweb
Re: Microsoft and Trust Take 2 - From: "mayayana" <mayaXXyana1a@xxxxxxxxxxxxxxxx> - Date: Mon, 7 Apr 2008 10:09:59 -0400 widerwider In VB it was expected that people would pretty much stick to VB-digestible COM objects and stay away from the API. In .Net it seems that breaking COM was a way to discourage involvement with the Windows system altogether and direct people toward sandboxed applets. I don't think that's true because they've opened up support to a much range of options. You can now call a much lower level of com object. In VBto it wasn't possible to call directShow natively and it was difficult with a typelib. In dot net it can call all of its interfaces directly. You need use the interop namespace but that's just the name they chose, they could have called it ComSupport or something and then it wouldn't be considered interop. They could have named it anything, but it still seems to be an extra layer that was put there deliberately to "deprecate" COM in .Net, just as they "deprecated" API access in both VB and .Net. (Not that the horse didn't get out of the barn in both cases, but it wasn't through MS's encouragement.) differentdifferentThe gist of that seems to be a description of Microsoft's long time threat/dream - to block actual 3rd-party programming altogether, sending everything through a filter layer. (As though there weren't enough layers already.) It's not really that big a deal. They're still using the same old model of having functions for you to call, they're just doing it through a mechanism. To have everything available through a dot net interface wouldbe pretty cool.I understood the article to say that there would be an entirely new API, which *might* be addressable directly via .Net code. That means MS has an opportunity to enforce the "safe" code idea, because only they - and close partners - would have access to the underlying API. I don't think there's any question that that's where they want to get to. 3rd-party programmers have become awkward hangers-on at the web services and entertainment-rental party. When you think about it, though, the article is confusing. It describes .Net as both legacy *and* possibly current in the new scenario. Either your .Net code is writing to the wrapper on top of the new API, or it's writing to the legacy wrapper that's a wrapper on top of the new API. I don't see how it can be both... It's enough to make one dizzy. :) Who said anything about running Xtra Problems? :)Who said anything about running Xtra Problems? :) and which is already partway to the services model (with MS being the only true admin. on a Vista system). Then the next thing is probably a closed services box. Looking at it that way, the last thing I'm worried about is whether VB will run on future Windows. It can do almost anything on 95-XP with no dependencies. That makes a lot of sense to me. After what you said (with regards to the next Windows) I'd be more nervous than ever using VB6. You might be happy to run XP for ever but what about your users? It will probably come to that, but for now I prefer my "'Ol Betsy" - the streamlined, spyware-free Win98, which is also free of insecure, unnecessary network services that can't be shut off. Actually, though, XP hasn't really been so bad in terms of writing software for others to use. Almost everyone is running as Admin. And even though MS has tried to set themselves up as an online shepherd with the new "security" functions in IE post-SP2, (so that people get dire security warnings and/or malfunctionality when trying to download software installers) in my experience the vast majority of people don't seem to notice Microsoft's attempts very much. When most people see a security warning they don't even read it. They just see another example of how difficult and complex computers are, and their only focus is on how to get past this latest confusing obstacle. But what I meant in what I was saying was that I don't consider what's coming - or rather what seems to be coming - to be usable at all. MS is branching into advertising and web services. They don't have much room left to focus on OSs per se. And they've developed conflicting motives. They can't make good OSs and good advertising venues at the same time. I already consider Vista unusable and XP barely salvageable, in terms of having an OS that I can use and control as I wish. I don't know where things are going in the future. Maybe some bright sky will appear from behind the gathering clouds of pending corporate usurpation of the public Internet. (A good example of those clouds was posted at the Washington Post this week: 052.html ) But the way I see it, .Net doesn't make any sense for basic Windows software, and we wouldn't be talking about this here at all if MS were not using .Net as part of their strategy to recast the product as services and advertising. To put that another way: Not only does MS want to switch from the auto business to the taxi business. They also want to force-drag their entire customer base along. I like to write software for my "car", and I find it satisfying to write software that might be useful to other people who drive cars. But those people who are going to accept being pushed into a taxi are on their own. I'm not going to write trinkets like MyTaxiRidePlayListOrganizer for people who don't know any better than to be passive cows in Microsoft's herd of "consumers". (The word Bill Gates consistently likes to use in referring to human beings. I find the word "consumer" to be actually quite evocative of a cow. They're both viewed as commodities that one can exploit for profit based on how much they "eat".) That's one of two big issues that seem to be very difficult to clarify in these .Net debates. The one issue is that .Net is a step toward programmers losing access to the plaform - where Windows actually ceases to be a platform at all, but rather becomes an appliance with an SDK. The second big issue is the confusion that MS so effectively created around the purpose of .Net (which is actually related to the first issue). That's the confusion that so many seemingly intelligent people could even begin to take seriously Microsoft's notion of writing Windows software with their bloated, heavily dependent, Java clone. (I don't even have a JVM installed. I certainly don't need a .Net framework installed.) But I suspect the real issue is not that people like you have been duped, but that you're in one of those areas (like intranet applet writing or server-side components) where .Net *can* make sense. For instance, Tom Shelton happened to make a comment recently about how he has control over the machines where his software runs. Maybe ..Net is handy in that scenario. Certainly the gigantic runtime dependency wouldn't be an issue. But for those of us writing software that's intended for use on any Windows PC, there's nothing to talk about. .Net doesn't make sense in a handful of ways, any more than Java does. (In fact, even less so, because the idea of being cross-platform is more than just a marketing scam with Java.) So as far as I can see, the only reason for people like you to keep debating this topic is because you want to help ensure that VB.Net doesn't get dropped. But whether you agree with my assessment of things or not, the fact still stands that I, and I think many of us here, view your arguments as no more relevant than a salespitch from a Java supporter. . - Follow-Ups: - Re: Microsoft and Trust Take 2 - From: Michael C - References: - Microsoft and Trust Take 2 - From: Michael C - Re: Microsoft and Trust Take 2 - From: Wolfgang Enzinger - Re: Microsoft and Trust Take 2 - From: Michael C - Re: Microsoft and Trust Take 2 - From: Tony Proctor - Re: Microsoft and Trust Take 2 - From: Michael C - Re: Microsoft and Trust Take 2 - From: Wolfgang Enzinger - Re: Microsoft and Trust Take 2 - From: Paul Clement - Re: Microsoft and Trust Take 2 - From: Wolfgang Enzinger - Re: Microsoft and Trust Take 2 - From: Paul Clement - Re: Microsoft and Trust Take 2 - From: Karl E. Peterson - - Prev by Date: Re: VB6 make DLL on Vista: "Error accessing the system regisry" - Next by Date: UDT default property? - Previous by thread: Re: Microsoft and Trust Take 2 - Next by thread: Re: Microsoft and Trust Take 2 - Index(es):
http://www.tech-archive.net/Archive/VB/microsoft.public.vb.general.discussion/2008-04/msg00692.html
crawl-002
en
refinedweb
sts sites bands links home music anime manga japanese dvd player cash index php artists rock music genre independent videos artists music agents realtors homes top search online store dvd import funniest videos show afv cheap anime dvd cds arts artists music recording artists find info home sex toys corporate rpg free online palm find games jukeboxes pool ping funniest browse info christmas wedding birthday books prints coins quarterarcade restored classic game arcade creative new orleans find community partner arizona high school movies cheap info anime info anniversary stores find illustrated books maps rare guns books information american collectibles info stores antiques adult products toy shop adult best toy shop text history documents action import online musicians unsigned download music bands gift stores info artists jhtml top society artists search guest books baskets ring flags state flag betsy favors bridal find free download music find songs dvds info find music giftstores cheap discount sale dvd free stores anniversaries traditional giftstores sale game machines video road old historical antique popular christian artists song play downloads free online movies manga discount anime free songs access download download free songs access bands news artists find giftstores america's info cds station alternative prints books antique rare used old rock giftstores gift stores gospel music downloads independent index great giftstores gospel rock gift stores game action candystand online arcade party crafts ideas games opera music singers artists find links music artists online videos info music gifts photo gift personalized wedding anniversary stores info information accessories bags movies giftstores gift stores find antique american map fine rare vintage books art furniture rock club artists math music artists bands msn modern wedding gifts jsp games online arcade operated bands music msn artists band music rock artist s game room maxpreps high arizonavarsity info anime php website gift stores offers traffic school text arizona jukeboxes ping pong stores games animation anime find cheap giftstores university arizona partner school games game restored pinball free puzzle download windows free find agent agents traditional modern second books information art activities free mario info gift stores music giftstores indie new music artists independent bands song music artists independent stores giftstores animated santa store find online info player dvds discount dvd inventory classic sale restored anniversary subject wedding modern import dvds online antique old magazines used books antiques video gift stores giftstores info rare giftstores gift stores find video machines s find rare book americana delivery day andhra telugu adult gourmet chocolate gifts arts bands jhtml videos dvd video movies dvds inventory arcade sale restored video music bands indie artists free christian home videos news index f Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/341094.html
crawl-002
en
refinedweb
gifts info info adult stores books info adult gift books betting bowl articles football arts council programs book artists shopping art music anniversary graduation unique wedding traditional php gifts giftstores animal info store gifts native american indian art dvd online games post anime dvds prints original botanicals maps botanical discount gay adult dvd store ado young book books teens action dvds vcds asian import videonow funniest america's stores american maps info stores action dvds info opera resident artist program home videos index america's dvds list index videos vintage info items pet cat dog addictive game search party find gifts adult reminder wedding gifts traditional modern puzzle giftstores gift stores download maps prints world map vintagemaps home php website stores accident home php website gift accident discover new music indie download christmas animated santa reindeer dancing christian rock music download available accident home php website stores videos dir yahoo rock pinball classic sale arcade information balls golf logo find artists math students rare information info dvd loaded anime baby personalized corporate directory info gift stores index find dvd player free cheap japanese budokai ball shin warrior animated american find information world music artists musicians bands manga dvd anime japanese movies home videos accident website accidents show america's home find play free flash downloads martayan lan rare old indian gifts art native harry order books amazon bands home lyrics artists art gallery mario wagner amazon soundtrack various artists toys anime cds stores info anime php dvds index gift modern twenty free best music bands albums directory artists new alternative organizations wide artists programs maine arts maine artists arts organizations wide programs giftstores adventure choose php gift stores arts organizations wide artists programs maine rock alternative bands links index funniest videos life channel shows personalized anniversary gift unique import dvds sales sphiatt globes inventory antique maps anniversary unique weddings christmas watch videos world music composers word games board free game usa games amusements video pinball usa banners american flag info play find games giftstores info music home artists bands sites movie thousands anime dvds toys links gift stores alternative bands dvd anime dvds search ebay art gift american indian native kids games crafts fun gift baby gifts graduation videos rap music latin artists index free christian jazz artists country indonesia stores find information giftstores info rock local band music alternative old collectible books vintage toys art math fractals artists library football betting sports articles anime thousands dvd dvds music christian collection bands music pop free music downloads independent index great giftstores info lyrics gift stores gift stores giftstores info lyrics crafts store info indian gifts i Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/340875.html
crawl-002
en
refinedweb
ist gaming index free arcade dvds dvd world import anniversaries modern gifts weddings kids child activities helicopter adicting games air hockey find sphiatt index php sales computers online shopping stores computer new artists independent bands custom brass tag cat text info index palm adicting game music band christian bags balls find artist artists pnd wedding anniversary ivillage anniversaries wandering online music radio movies dvd blue php glass antique paper collectibles software palm new find adicting shopping computer advantages gift reviews downloads interviews music coin sales amusements cards index animated resources index find great info japanese manga dvds cds rent show america's videos php alternative guest guestbook framed ping arcade hockey arizona text arizona's antiques info find bands artists free genres yahoo antique antiques art toys jewelry animated lair free games dragon home indiana shopping music gift history giftstores info pet brass custom dog cat php artists gift stores artist information music online antiques print american information gift information arcade categories price bestsellers bands movies musicians bands books musicians new favorite guide page ivillage online shopping traditional weddings paper cotton traditional paper cotton weddings puzzle info stores giftstores puzzle info gift giftstores list anime music video links text index african cheap online poster manga book harry potter obidos artist news reviews downloads text arizona moon player lcd monitors music christian radio find info cheap download downloads online antiquarian antique print wedding traditional modern gift justpaperroses text online american history historical poster store cheap anime manga library artists' conference artist's book free legal links download find fifth coral jade silver sterling rock bands index php populations american index indian children games kids ideas links modern alternative club mute math classical entertainment artist best news artists supplies parties party activities racks offers gifts food graduation giftstores stores info game dragon shin budokai codes coupons online stores print info gift valentines adult figures dvds dvd world import antique antiques paintings furniture silver info computers advantages gift shin animated gifs antique georgia illustrated amusements stores find home soundtracks artists msn free free music best independent towa game find free online artists store apple itunes search news addictive giftstores print antique abc afv funniest custom tags cards links gift animal download vote click altervista plugin download click banner altervista plugin altervista web download click banner info information american website import dvds php dvd home education action ebay gift japanese find anime free download songs music jazz artists info best gifts store videos funniest news php home open artists gospel gift stores soundtracks giftstores ebay manga movie anim Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/342133.html
crawl-002
en
refinedweb
over new artists giftstores info math free dvd cheap party giftstores stores activities birthday links find books fishing articles giftstores activities birthday party stores birthday party giftstores stores activities arcade items info gift stores rare antiques used print photo unique gifts personalized open christian gospel music info bands home gift india delivery andhra dvd movies free anime toys music antique typewriter information typewriters early info anniversary ivillage stores find music artists browse addicting games find online ephemera books directory shopping antiques collectibles collectibles ephemera books directory shopping antiques antiques collectibles ephemera books directory shopping new music rock download video amusements source pinball video arcade information offers accessories golf band home rock bands artists music soundtracks gift stores find music music soundtracks gift stores find artists bands mtv jhtml radio arcade action free puzzle andhra pradesh sarees antique print art prints old rare anime boxsets manga videos dvd movies home artists find download rock anime dvd player cheap dvds gallery special typewriters typewriter books maps prints giftstores stores books antiques rock new audiostreet pradesh send games ideas fun game arcade operated wine cellars stemware home gifts food gifts ivillage info gift find giftstores activities children party kid birthday parties stores native gifts giftstores info offers aids ladies clubs golf jewelry native indian crafts american info giftstores games download php animal dog cat pet tags weddinggifts anniversary wedding gift used stores ebay antique christian free jazz artists artists christian free jazz gospel various search cmt bands music artists music downloads download free rock best music find arts recording artists made art gifts native crafts available alternative punk music download artists download pop music downloads php gift addictive games php stores addictive games party kids birthday ideas paper ephemera home antiques asp orleans artists creative addictive games php gift information collecting typewriters gallery antique collection artists giftstores info gift stores music toys gifts gift shop adult shop gifts adult toys gift american giftstores info maps addictive search giftstores old vintage find used books magazines lyrics music news website offers artists stores info information maps books gift activities party games info traditional modern anniversaries flowers online dvd find cheap modern ideas gift guide favorite find access music america's videos hasbro browse american style crafts art estate new find cmt browse jhtml birthstones flowers guide arcade online giftstores antique atlases road ideas games graduation college gifts personalized birthday gift anniversary list modern traditional stores music download giftstores japanese asp dvds anime import dvds sales japanese book books print old band sites lyrics artists anni Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/342708.html
crawl-002
en
refinedweb
o quarterarcade restored arcade game find index asp estarland trade anime index dragon warrior animated gifs artists music contemporary funk gospel artists bands jhtml stores find artists bands jhtml gift find american map art rare maps accessories gifts gift stores find information accessories balls golf info artists music classical chocolates gifts erotic toys balls information golf accessories america's funniest videos show abc afv stores anniversary guide giftstores info paper china gift anniversary anime dvd cds hentai jewelry books vintage old antique collectible giftstores music artists gift stores alternative gift stores giftstores alternative music artists vintage antiques toys furniture jewelry traditional anniversary gifts redenvelope ccat traditional anniversary gifts giftcatalog ccat rare used antiquarian print prints movies books music discover artists modern traditional gifts sixth year discover artists movies books music books music discover artists movies alternative bands music new forums year anniversary gift ivillage download music artists apple gift computers giftstores stores gift info accessories wedding guest home america's funniest videos giftstores info php dvds forums movies music music gospel lyrics rock links cheap player gift anime giftstores aol unlimited download music videos wild funny videos rock bands msn artists free party games gift index find classical artists links yahoo music artists free music new favorite workshops high info arizona views antique maps ebay store favorite artists music info stores year guide great free music artists american shop indian gifts anniversary gifts subject american artists artist program lyric opera music indie pop music independent bands arts council information funny giftstores info gift stores animal download music unsigned artists videos jazz download find game index adicting games musicians directory arts bands free bands discover music import anime action figures dvds vcds games new free flash adicting antique maps offers giftstores music video msn world find american maps machines arcade foosball machines arcade foosball jukeboxes free basketball games new info free arcade bands society music old books prints historical original indie independent bands music artists food find wine giftstores info asin amazon goblet fire book latin artists music search video videos animals find cheap online stores giftstores artists music rock online artists genre find music music rock giftstores stores artists music rock giftstores gift artists giftstores artists music rock stores pages antiques toys vintage find gifts artists arcade usa games arcade sega games rock music available jazz indian gifts info stores classical links artists guitar bands new alternative website vhs anime manga sale vintage antique collectibles character dolls print american furniture information antique online game find new games home music new artists books favorite artists music sto Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/342581.html
crawl-002
en
refinedweb
ay download action latin index find information pinball info game helicopter free browse videonow videos browse videos books historical stores online gift adult sex toys find potter amazon harry mugglenet books rock new indie music gifts gift ivillage wedding gift baskets gifts toys free find artists unsigned dvd amazon manga online artists books art library artists artist program index addicting index php adicting band bands links crafts gifts indian jewelry made stores info giftstores adult stores dvds giftstores info games pool funniest hollywood home fitness index dvds text gift ivillage roses gifts anniversary anniversaries download unlimited access music aol gift dvd videos rock download video artists new adult discount prices xxx home pinball games video game books library art artists affordable effective website cotton second video discount movies dvds find links information native indian national free store music stores itunes band music alternative rock find traditional info stores anniversaries print rare book lowry home life channel shows gift stores info video gift info anniversary baby leichenschrei ambigram demons graduation ideas christmas find machines arcade stores info movies index php pop directory artists artist mtv paintings antique furniture silver books gifts bands index modern rock ping pong arcade hockey php store gift stores music lyrics band christian jewelry pottery american art gift bands jhtml info artists' artist's books conference organizations art arts download play giftstores world find artists giftstores info import giftstores stores find music map fine maps books ideas christmas anniversary baby cheap dvds free dvd artists galleries home indiana adicting games flash overly jewelry find gifts gift cafepress gifts movies funny php manga blue anime anime movies funny php manga blue player cash free cheap dvds music download reviews artist harry amazon potter der ebay accessories computers action artist information bands artists search toys dvds rock bands gospel find bands musicians art artists wagner amusements items pinball find anime gift cheap dvd dvds gift info giftstores birthday dvds music games japanese videos gift info historical documents text jhtml madonna artists rock pop items travel gifts roadtrucker anniversary traditional weddinggifts wedding text find high gifts presents wedding antique art furniture anniversary birthday baskets anniversary baskets birthday action candystand shockwave figures anime action music import links jewelry pet tags pinball restored video home index artist home music cmt website directory classical directory gospel music guide american antique items find ebay unique alternative books platters pet gifttags gift tags free new find addicting search gift best links native indian downloads giftstores stores kid games kids parties free movies music japanese anime games find anime japanese game restored pinball a Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/341713.html
crawl-002
en
refinedweb
versary information print collectibles books computers info gift advantages contest index php america's funniest home traditional gift subject anniversaries gallery model anime figures classical information links artists browse artists amazon soundtracks book wedding index platters bands independent christian indieforce artists artist rock home amazon soundtracks artists browse php stores football php gift football native american website artists music bands musicians information home find gift stores artists free candystand sports game action play air hockey games pinball information adult find shop products gifts rock golden artists free music math stores information info home bands items find american books giftstores gift info giftstores movies discount php anime forums japanese board puzzle game find american organization indian languages toys drinking stores gift info giftstores artists find america's videos news index php funniest information website online arcade games entertainment information classical music artists manga dvd stores dvds gift baskets gifts magic videos show find artists christian downloads stores gift info adventure online manga dvd used dvds amazon game sale foosball air hockey ideas personalized day christmas ideas day birthday christmas home art artists shopping game rpg playstation games adult unique corporate gift music events arts artists links cotton wedding gifts justpaperroses traditional cash gift cheap dvd movies wife photo unique gifts party adult chocolate erotic jewelry gifts manataka american gift info sale dvds games ing find animated book exec obidos amazon american jewelry gifts manataka activities kid games parties gift stores information giftstores info christian music home download artists christian indian crafts gifts find tested books used stores store ebay obidos potter harry find movie dvds toys amazon dvds info stores discount ideas gift ambigram demons good toys shop toy sex personalized birthday christmas gift online download downloads puzzle atlases prints books antiquarian adventure action import find london games free national american indian languages information welcome download giftstores gift stores find info giftstores info free stores artists giftstores info free gift artists pop music download indie music indie pop download import stores action gift dvds anime manga find birthday themes games kid football betting fantasy year ehow palabra shop box email antique gift japanese anime cds toys rent home directory bands artists local gift adicting download info stores bands discover new music download adventureonline online new adventure free traditional info gift anniversaries search thousands dvd dvds dvd thousands dvds search rare old antiquarian artists music movies wedding graduation christmas day gift jewelry made art native crafts year gifts modern traditional vintage old antique used university arizona offers information statues anime music onl Splurfle - Online Shopping & More If you experience unusual problems with this site please email the webmaster.
http://splurfle.com/Store-Pages-18/341319.html
crawl-002
en
refinedweb
Lets Build Web Components! Part 1: The Standards Benny Powers 🇮🇱🇨🇦 Updated on ・10 min read Let's Build Web Components! (8 Part Series) Component-based UI is all the rage these days. In fact it's so established that people have even started retconning old-school jQuery widgets as "jQuery Components" ;) When we say "Component", we're mostly referring to self-contained, reusable bits of UI which, once written, we can insert into our apps wherever we want. Fancy interactive buttons, specially designed pull-quotes, or the perennial favourite card widgets are examples of the types of designs that lend themselves well to components. Did you know that the web has its own native component module that doesn't require the use of any libraries? True story! You can write, publish, and reuse single-file components that will work in any* good browser and in any framework (if that's your bag). Read on to find out how! Overview Web Components is an umbrella term that refers to a set of four browser standards that work together to form the web's native component model. <template>elements let you quickly reuse portions of DOM - Custom Elements connect JS classes to custom HTML tags - Shadow DOM hides your shame from the rest of the page - JavaScript Modules to package and publish components Each of these standards provides one piece of the puzzle. In this introductory post, we're going to briefly introduce each of them and explain how they help us in practical web development. <template> Elements The fundamental idea of components is reusable UI. To create that, we need a way to define a template for our component. If you're familiar with React, then you've probably used JSX before. If you're more an Angular type, you've likely defined templates in JavaScript template literals. The <template> element lets us define snippets of HTML which aren't added to the document until cloned by JavaScript. The browser only needs to parse that HTML once (e.g. when the document loads), and can then clone it cheaply whenever asked to. Here's a (really contrived) example of the template element in action: <template id="dialog-template"> <dialog> <p></p> <button>⚓️ All Ashore!</button> </dialog> </template> <label> Type a <abbr title="message"> 💌</abbr> <input id="input"/> </label> <button id="clone-it"><abbr title="Go!">🦑 Ahoy!</abbr></button> <script> document.getElementById('clone-it').onclick = () => superAlert(input.value); function superAlert(message) { // get a reference to the template const template = document.getElementById('dialog-template'); // clone or "stamp" the template's contents const clone = template.content.cloneNode(true); // Make any changes to the stamped content const diag = clone.firstElementChild; // <dialog> element polyfill dialogPolyfill.registerDialog(diag); diag.firstElementChild.textContent = message; diag.lastElementChild.onclick = function closeModal() { diag.close(); diag.remove(); } document.body.appendChild(diag) diag.showModal(); } </script> Using <template> elements is easy and performant. I put together a silly little benchmark that builds a simple table three ways: by cloning a template element, by directly using DOM APIs, and by setting innerHTML. Cloning template elements is the fastest, DOM APIs are a little slower, and innerHTML is slowest by far. So the <template> element lets us parse HTML once and reuse it as many times as we want. Exactly like what we need for our reusable components! Read more about the <template> element and it's DOM API at MDN. Custom Elements The second standard we're going to take a look at is called custom elements. It does exactly what it says on the box: it lets you define your own custom HTML tags. Now you don't have to settle for just plain old <div> and <span>, but you can mark up your pages with <super-div> and <wicked-span> as well. Custom Elements work just like built-in elements; add them your document, give them child elements, use regular DOM APIs on them, etc. You can use custom elements everywhere you use regular elements, including in popular web frameworks All custom element tag names must contain a dash, to differentiate them from built in elements. This also helps to avoid name conflicts when you want to use <bobs-input> and <sallys-input> in the same app. As well, Custom elements can have their own custom attributes, DOM properties, methods and behaviours. An example of how you might use a custom element: <section> <p>Twinkle, twinkle, little <super-spanstar</super-span>.</p> <awesome-button exuberant>Shine it!</awesome-button> </section> Custom elements are defined as JavaScript classes, and registered on the window.customElements object via its define method, which has two parameters: a string to define the element's name, and a JavaScript class to define its behaviour. This example takes a boring old <span> and gives it emoji super-powers! Give it a try. customElements.define('super-span', class SuperSpan extends HTMLElement { /** * `connectedCallback` is a custom-element lifecycle callback * which fires whenever the element is added to the document */ connectedCallback() { this.addEventListener('click', this.beAwesome.bind(this)) this.style.display = 'inline-block'; this.setAttribute('aria-label', this.innerText); switch (this.innerText) { case 'star': this.innerText = '⭐️'; } } /** * You can define your own methods on your elements. * @param {Event} event * @return {Animation} */ beAwesome(event) { let keyframes = []; let options = {duration: 300, iterations: 5, easing: 'ease-in-out'} switch (this.getAttribute('animation')) { case 'shine': keyframes = [ {opacity: 1.0, blur: '0px', transform: 'rotate(0deg)'}, {opacity: 0.7, blur: '2px', transform: 'rotate(360deg)'}, {opacity: 1.0, blur: '0px', transform: 'rotate(0deg)'}, ]; } return this.animate(keyframes, options) } }); Custom Elements have built-in features like lifecycle callbacks and observed attributes. We'll cover those in a later post. Spoiler alert: You can read all about custom elements on MDN Shadow DOM What stalks the document tree, hiding in the shadows, the dark places where innocent nodes fear to tread? Dada dada dada dada! Shadow DOM! I am darkness. I am the night. I am Shadow DOM! Although "Shadow DOM" might sound exotic, it turns out you've been using it for years. Every time you've used a <video> element with controls, or an <input> element with a datalist, or others like the date picker element, you've been using Shadow DOM. Shadow DOM is simply an HTML document fragment that is visible to the user while at the same time isolated from the rest of the document. Similarly to how iframes separate one document from another embedded document, shadow roots separate a portion of a document from the main document. For example, the controls in a video element are actually a separate DOM tree which lives, batman-like, in the shadows of your page. Global styles don't affect the video controls, and the same is true vice-versa. Why is isolating DOM a good thing? When working on web apps of any non-trivial size, CSS rules and selectors can quickly get out of hand. You might write the perfect CSS for a single section of your page, only to have your styles overruled by your teammate further down the cascade. Even worse, your new additions to the app might break existing content without anyone noticing! Many solutions to this problem have been developed over time, from strict naming conventions to 'CSS-in-JS', but none of them are particularly satisfying. With shadow DOM, we have a comprehensive solution built in to the browser. Shadow DOM isolates DOM nodes, letting you style your components freely, without worrying that other portions of the app might clobber them. Instead of reaching for arcane class names or stuffing everything into the style attribute, you can style your components in a simple, straightforward way: <template id="component-template"> <style> :host { display: block; } /* These styles apply only to button Elements * within the shadow root of this component */ button { background: rebeccapurple; color: inherit; font-size: inherit; padding: 10px; border-radius: 4px; /* CSS Custom Properties can pierce the shadow boundary, * allowing users to style specific parts of components */ border: 1px solid var(--component-border-color, ivory); width: 100%; } </style> <!-- This ID is local to the shadow-root. --> <!-- No need to worry that another #button exists. --> <button id="button">I'm an awesome button!</button> </template> <style> /* These styles affect the entire document, but not any shadow-roots inside of it */ button { background: cornflowerblue; color: white; padding: 10px; border: none; margin-top: 20px; } /* Custom Elements can be styled just like normal elements. * These styles will be applied to the element's :host */ button, awesome-button { width: 280px; font-size: inherit; } </style> <awesome-button></awesome-button> <button id="button">I'm an OK button!</button> <section id="display"> <abbr title="click">🖱</abbr> a <abbr title="button">🔲</abbr> </section> Shadow DOM is the secret sauce in web components. It's what makes them self-contained. It's what gives us the confidence to drop them into a page without worrying about breaking other parts of the app. And starting with Firefox 63, it's available natively on all good browsers. Read more about Shadow DOM on MDN With these three standards: Template, Custom Elements, and Shadow DOM, we have everything we need to write rich component UIs that run directly in the browser without needing any special tooling or build steps. The fourth standard, JavaScript Modules, enables us to factor complex apps composed of custom elements and publish our components for others to use. JavaScript Modules When we use the word module, what we mean is a freestanding piece of software which contains its own scope. In other words, if I define a variable foo in some module, I can only use that variable inside that module. If I want to access foo in some other module, I'll need to explicitly export it first. Developers have been finding ways to write modular JavaScript for some time now, but it's only been fairly recently (since 2015 in the specs, and for the last year or so in practice) that JavaScript has had its own module system. import { foo } from './foo.js' const bar = 'bar' export const baz = foo(bar) There's a lot to say about modules, but for our purposes, it's enough that we can use them to write and publish web components. Here's a simple example to whet your appetite. // super-span.js const options = {duration: 300, iterations: 5, easing: 'ease-in-out'} const keyframes = [ {opacity: 1.0, blur: '0px', transform: 'rotate(0deg)'}, {opacity: 0.7, blur: '2px', transform: 'rotate(360deg)'}, {opacity: 1.0, blur: '0px', transform: 'rotate(0deg)'}, ] const template = document.createElement('template') template.innerHTML = ` <style> span { display: inline-block; font-weight: var(--super-font-weight, bolder); } </style> <span><slot></slot></span> <abbr title="click or mouse over">🖱</abbr> `; customElements.define('super-span', class SuperSpan extends HTMLElement { $(selector) { return this.shadowRoot && this.shadowRoot.querySelector(selector) } constructor() { super() this.shine = this.shine.bind(this) const root = this.attachShadow({mode: 'open'}) root.appendChild(template.content.cloneNode(true)) this.addEventListener('click', this.shine) this.addEventListener('mouseover', this.shine) } connectedCallback() { const slot = this.$('slot') const [node] = slot.assignedNodes() this.setAttribute('aria-label', node.textContent) node.textContent = '⭐️' } shine(event) { this.$('span').animate(keyframes, options) } }); And then in our app's HTML: <script type="module" src="./super-span.js"></script> <super-span>star</super-span> And this, my friends, is the coin-drop moment when you realize how awesome web components can be. Now you can easily import pre-made custom elements with awesome behaviour and semantics right into your documents, without any build step. <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <meta charset="utf-8"> <title>Be Excellent to Each Other</title> <script type="module" src="//unpkg.com/@power-elements/lazy-image/lazy-image.js?module"></script> <script type="module" src="//unpkg.com/@granite-elements/granite-alert/granite-alert.js?module"></script> <script type="module" src="//unpkg.com/@material/mwc-button/mwc-button.js?module"></script> <link rel="stylesheet" href="style.css"> </head> <body> <header> <h1>Cross-platform, Framework-Agnostic, Reusable Components</h1> </header> <main> <granite-alert id="alert" level="warning" hide> <lazy-image role="presentation" src="//placekitten.com/1080/720" placeholder="//web-components-resources.appspot.com/static/logo.svg" fade ></lazy-image> </granite-alert> <mwc-button id="button" raised>🚀 Launch</mwc-button> <script> const alert = document.getElementById('alert') const button = document.getElementById('button') const message = document.getElementById('message') button.onclick = () => { alert.hide = !alert.hide; button.textContent = alert.hide ? '🚀 Launch' : '☠️ Close' } </script> </main> </body> </html> Conclusion Web components standards let us factor self-contained, reusable UI that runs directly in the browser without cumbersome build steps. These components can then be used anywhere you use regular elements: in plain HTML, or within your app's framework-driven templates. In our next post, God-willing, we'll learn how the webcomponentsjs polyfills let us design components and compose apps even for browsers that don't natively support them. 😀 Thanks for reading! 😁 Lets Build Web Components! Part 2: The Polyfills Benny Powers 🇮🇱🇨🇦 ・ 12 min read Would you like a one-on-one mentoring session on any of the topics covered here? Errata - A previous version of this article showed an example of accessing light DOM attributes and children in the constructor. This kind of work should be deferred until connectedCallback. - Since this post was originally published, Microsoft has begun development on the web components standards in Edge. Party time! Let's Build Web Components! (8 Part Series) 😀 Thanks for reading! 😁 I notice that you too use customElements.define and not document.registerElement. I have been wondering if registerElement is (to be) deprecated but never found a satisfying unopioniated answer. Do you have any insight? Exactly right! document.registerElement is deprecated and shouldn't be used. @bennypowers I would like to show you document.defineElement/nativeEleme..., a polyfill for custom elements that does not require a hyphen (-) in the HTML tag. It has been a while now since I created that repository but today I added the nativeElements.define() method because you confirmed that registerElement is deprecated in 2018. Obviously I am aware that this is generally a bad idea to be doing but perhaps you appreciate it nonetheless. 🤣 Awesome! totally awesome.. man, I think that this serie of articles are GOLD. I have researching for web component and web component libraries, I first read the 3 first articles then I start do research in other sites.. after a time... the researching brought me here again. What I mean is that your articles right now are a high quality resource for web components info since what the people usually know about it is almost anything so is difficult to found something like this. Thanks very much to share! Thanks for the kind words 😁 Nice article! Using template.innerHTMLare quite hacky and cannot make use of IDE or Editor's features. Is there any way (e.g. load html file) to replace template.innerHTMLmethod in your super-span.jsafter HTML-import are deprecated? I think you'd like to check out lit-element. I have a whole article on it in part 5 of this series. One of the best article on Web Components both for content and clarity. Really great work @bennypowers ! And looking forward for the 2nd part, of course 🔥 Great post, and a great resource in general. I'll be referring to this a lot when I'm trying to learn more. Really looking forward to the next one :) Thanks, TJ :D This article has restored my faith in the Javascript ecosystem. Thank you Benny. That's what we came here for :) Glad you enjoyed it. Excellent example you bring about web components. Awesome! Thank you, Satya! Please enjoy the other posts in the series. Really great article. Thanks for taking the time to write this :) Here's an example of implementing config driven UI elements using custom components. medium.com/@paramsingh_66174/gener... Thanks, Benny! This post really opened my eyes on how far the standard has come since the old days of Polymer. Much like your write-up, the syntax is finally clear and concise. One of the best posts I've read. Very well explained. It will be a nice future reference. Can't wait for the next one. Thank you! 😁 Hi Benny I've never seen the element method .content from const clone = template.content.cloneNode(true); Is this new or just something thats always been available? Hey Eric thanks for dropping by 🍻 content is a read-only property on HTMLTemplateElementthat's not provided on other elements. It's purpose is to give you a handle on the template's contents as a DocumentFragment, so that you can clone it.
https://dev.to/bennypowers/lets-build-web-components-part-1-the-standards-3e85
CC-MAIN-2019-43
en
refinedweb
How can I align a set of three buttons to the right side of a top-docked toolbar, and have another single button aligned to the left? I guess in general my question is can I specify the alignment of buttons in tools bars? In ExtJS, you can use something like '->' to align buttons in toolbars, but don't see this as an option in Touch. I see examples of button alignment in the Kitchen Sink, but that is all against Panels using pack and align, not a Toolbar component. Here is my Toolbar component. I would like to have Home, Search, and Find a Store aligned to the right, and Back aligned to the left but hidden. I will call a method on the component to show the Back button when needed. I just need to figure out how to align the buttons. Code:mynamespace.views.mainToolbar = new Ext.extend(Ext.Toolbar, { id: 'mainToolbar', ui: 'light', initComponent: function() { Ext.apply(this, { items: [ { id: 'mainToolbarBack', text: 'Back', handler: this.tapHandler, ui: 'back', hidden: true }, { text: 'Home', handler: this.tapHandler, pressed: true }, { text: 'Search', handler: this.tapHandler }, { text: 'Find A Store', handler: this.tapHandler } ] }); mynamespace.views.mainToolbar.superclass.initComponent.apply(this, arguments); } ...... Any ideas would be great, thanks!
https://www.sencha.com/forum/showthread.php?116098-Toolbar-button-alignment&p=540070
CC-MAIN-2019-43
en
refinedweb
Provided by: libncarg-dev_6.3.0-6build1_amd64 NAME PWRZT - Draws the projections of character strings that are positioned in a plane parallel to one of the three coordinate planes. NOTE: At one time, a Threed routine called PWRZ was supported. PWRZ has been superseded by PWRZT and is therefore considered obsolete. If you call PWRZ, you will get an error message telling you to use PWRZT instead and execution will be terminated. SYNOPSIS CALL PWRZT (U,V,W,CHRS,LCHRS,ISIZE,IDIR,ITOP,ICEN) C-BINDING SYNOPSIS #include <ncarg/ncargC.h> void c_pwrzt (float u, float v, float w, char *chrs, int lchrs, int isize, int idir, int itop, int icen) DESCRIPTION U,V,W (input expressions of type REAL) are the U, V, and W coordinates of a point, in the user's 3-space, relative to which the character string is to be positioned. CHRS (an input constant or variable of type CHARACTER) is the character string to be drawn. It may contain uppercase alphabetic characters, the digits 0 through 9, and a small set of "special" characters (plus, minus, asterisk, slash, left and right parentheses, equals sign, blank, comma, and period). Other characters are treated as blanks. (Note especially that lowercase alphabetic characters are not available.) LCHRS (an input expression of type INTEGER) is the number of characters in CHRS. ISIZE (an input expression of type INTEGER) specifies the character width to be used, defined in terms of the width of the plotter frame. Because projected characters vary in width depending on just where they are placed within the box being viewed and the position from which that box is viewed, ISIZE is interpreted as specifying the width of a character when that character is positioned and viewed in such a way as to make it as large as it could possibly be - when the character is on the near side of the box and in a plane perpendicular to the line of sight. Specifically, If between 0 and 3, ISIZE is 1., 1.5, 2., or 3. times a standard width equal to 1/128th of the screen width. If greater than 3, ISIZE is the character width in units of 1/1024th of the plotter frame. One third of the "width" referred to here is white space. What ISIZE really specifies is the distance between the centers of adjacent characters in a string. Characters are digitized to be 7/6 * ISIZE units high and 4/6 * ISIZE units wide, excluding white space. IDIR (an input expression of type INTEGER) is the direction in which the character string is to be written, as follows: 1 = +U -1 = -U 2 = +V -2 = -V 3 = +W -3 = -W ITOP (an input expression of type INTEGER) is the direction from the center of the first character to the top of the first character; possible values of ITOP are the same as those of IDIR, above. ABS(ITOP) must not be equal to ABS(IDIR). ICEN (an input expression of type INTEGER) is the centering option, specifying where (U,V,W) is relative to the string written, as follows: -1 (U,V,W) is the center of the left edge of the first character. 0 (U,V,W) is the center of the entire string. 1 (U,V,W) is the center of the right edge of the last character. Because characters drawn by PWRZT are stroked using the GKS polyline primitive (so that they can be projected from 3-D to 2-D), they. ACCESS To use PWRZT or c_pwrzt, load the NCAR Graphics libraries ncarg, ncarg_gks, and ncarg_c, preferably in that order. SEE ALSO Online: threed, curve3, fence3, frst3, line3, perim3, point3, psym3, pwrz, set3, threed, tick3, tick43, vect3, ncarg_cbind. Copyright (C) 1987-2009 University Corporation for Atmospheric Research The use of this Software is governed by a License Agreement.
http://manpages.ubuntu.com/manpages/xenial/man3/pwrz.3NCARG.html
CC-MAIN-2019-43
en
refinedweb
Provided by: apparmor_2.10.95-0ubuntu2_amd64 NAME aa-exec - confine a program with the specified AppArmor profile SYNOPSIS aa-exec [options] [--] [<command> ...] DESCRIPTION aa-exec is used to launch a program confined by the specified profile and or namespace. If both a profile and namespace are specified command will be confined by profile in the new policy namespace. If only a namespace is specified, the profile name of the current confinement will be used. If neither a profile or namespace is specified command will be run using standard profile attachment (ie. as if run without the aa-exec command). If the arguments are to be pasted to the <command> being invoked by aa-exec then -- should be used to separate aa-exec arguments from the command. aa-exec -p profile1 -- ls -l OPTIONS aa-exec accepts the following arguments: -p PROFILE, --profile=PROFILE confine <command> with PROFILE. If the PROFILE is not specified use the current profile name (likely unconfined). -n NAMESPACE, --namespace=NAMESPACE use profiles in NAMESPACE. This will result in confinement transitioning to using the new profile namespace. -i, --immediate transition to PROFILE before doing executing <command>. This subjects the running of <command> to the exec transition rules of the current profile. -v, --verbose show commands being performed -d, --debug show commands and error codes -- Signal the end of options and disables further option processing. Any arguments after the -- are treated as arguments of the command. This is useful when passing arguments to the <command> being invoked by aa-exec. BUGS If you find any bugs, please report them at <>. SEE ALSO aa-stack(8), aa-namespace(8), apparmor(7), apparmor.d(5), aa_change_profile(3), aa_change_onexec(3) and <>.
http://manpages.ubuntu.com/manpages/xenial/man8/aa-exec.8.html
CC-MAIN-2019-43
en
refinedweb
- what is the problem with it? Basically, this function converts from 'success-or-exception' to 'success-or-null'. You say the caller has to check for null. That's true. In the other case, it would have to handle the exception. One of the two has to be done. Admin NevemTeve: Not really. With a null check it must be explicit at the point of return from getProject() or else the context will be lost when a NullReferenceException eventually occurs at the point of first use. The point of the whole try/catch model is to make "fail fast" easy and unobtrusive; at the point of exception you do things like logging or maybe exception wrapping, then you let it bubble out to the top and crash the whole app. Or in the case of a server-ish app, abort & reset that particular task / connection / transaction / thread. But up near the top of that object / call stack, not down near at the point of failure. In the frist model you're stuck littering your code with an if (somehowDetermineAllErrorStates) { } handler after almost every method call or property reference. In the second model you're not. Seems a fairly clear choice to me. Admin The only possible exception (unless they have nuclear station launch code in conductor) is an out of memory. I'm wondering how much memory logging the exception consumes? It'd be real sad if it's more than creating an object. Addendum 2019-03-07 07:16: Constructor* Admin Depends on how much code that constructor executes. It could easily do all sorts of complicated stuff, connect to databases, etc. And might throw an exception if any of those things fails. I agree that throwing an exception out of a parameterless public (AKA default) constructor is very bad form. But for enterprisy hairy objects with names like "Project" not all that rare. Part of the fun of TDWTF is guessing about the rest of the code we don't get to see. Admin Why is that the only possible exception? The constructor could have anything in it. Like a call to some kind of persistence layer or to append something to a logfile. These kinds of situations can throw "any" kind of exception. Admin The problem is that it's either useless or making things worse. If you don't do anything about the exception, if will simply bubble up the call stack to whoever cares and it's done. If you return null, then the caller is forced to do something similar therefore it will simulate the exception behavior with more effort. If the caller will not do anything then it will fall back to previous behavior by NPE. It's clear, that at the end the result is the same and the extra handling code is not needed at all. Admin Yes, one or the other must be done. The beauty of this implementation is you get to do both. We could continue this pattern all the way up the stack: If (project == null) throw new ProjectMissingException() ... catch (ProjectMissingException) { project = null } ... if (project == null) throw new HigherLevelProjectMissingException() Admin I agree. The pattern I like to follow is: Boolean Function1 (out Result, ref Stack) { try { Result = somethingelse(); return (Result != null); } catch { Result = null; Add(Stack,Exception); Log(Exception); return (Result != null); } } Boolean Function2 (Stack) { try { Result = null; if (Function1(out Result, Stack) == false || Result == null) { throw new Exception("Call to Function1() failed."); } return true; } catch { Add(Stack,Exception); Log(Exception); return false; } } Once you get into a pattern of checking for A) false returns and B) null objects coming back, it's really easy to write a very reliable and easy to debug system. If you name your stuff right and the bottom layer has semi-useful exception messages, sometimes the users can figure out what went wrong without your help (very helpful when it's more of a usage error than a bug). But some developers don't want to use exceptions consistently, or log useful details, or separate their code into logical pieces, or write decent error messages, so everything looks like "too much work." TRWTF is this snipped being submitted and accepted as WTF. Admin The biggest thing that offends me here is that the original exception is lost. Something went wrong, good luck figuring out what! I'd say the exact opposite of what Tina's co-worker understood: Never catch exceptions that you don't understand. The fact that an exception occurred that you didn't understand means that you have a bug and you have no idea what the implications are. Best to gather diagnostic information and get back to a known state ASAP. Admin To all the complete and utter wastes of oxygen which are the majority of the correspondents on this site ... Come on, at the very least log the exception message. Admin The problem is that they aren't actually handling the exception. Just logging and kicking the can. The general rule of thumb in java is to ask yourself "can I actually handle this exception in the given context?" If the answer is no you throw the exception. You don't catch log and return null. Admin Yes, it's just an impedance mismatch between two layers of code (perhaps the lower layer is a third party library with no source available). Translate and log. Nothing wrong with that. I've had a number of occasions when I've written a "catch" which does nothing but write a log message - you need to know what the logging, monitoring and alerting systems are doing with the log entry to have a full understanding of this code. Admin A try catch is not always the proper way to handle an error, in this case it might be assumed that the caller will have some kind of error handling that wouldn't be appropriate in a catch block. Without knowing the context of how this is being called the only real issue here is not logging more details to the log. Admin While it certainly stinks of code rot, I do not think it is an [completely] illegitimate means of handling. It primarily just convert the means of handling and assumes that regardless of type of failure continuation is impossible. If I had to critique one thing, it would be that the exception itself is not logged. The only reason I can think of doing so is that there is exactly one reason why initialization would have failed. It is, however, for this reason why I usually try to avoid any operation that could result in an exception in a constructor if it makes sense in the design. In this case, I would feel more comfortable if Project was an interface (IProject for the dot-net folks) and the constructed class was "new ProjectImplemented();" Admin No. The real issue is catching the exception at all. As Dank mentioned (and it applies to .Net as well), don't catch an exception unless you can either handle it gracefully or add some valuable information. Deal with the logging somewhere else. i.e. a Global Exception handler. Or use a nice library like log4net. Admin In java you have checked exceptions. Anything that isn't a checked exception is a runtime exception. Runtime exceptions are usually caused by a logical error vs checked exceptions which are considered unavoidable(i.e. connection timeout). Runtime exceptions should be fixed at their source rather than being caught. The compiler and your IDE force you to address checked exceptions by either catching them or throwing. In short: They didn't because the compiler didn't force them to. Admin Finally, someone who understands the benefit of exceptions over return codes. Admin Aside from the ridiculous amount of boiler-plate code a policy of catching and logging exceptions inevitably leads to a lot of repeated messages in your log file... all the way up the call stack. Admin There is probably some interesting stuff further up the call stack where it catches any null reference exception and retries, but lets every other exception die. Admin Presumably this is from an enviroment that can give you the call stack, right? Because I've seen something like that done in environments that couldn't give you the call stack: you got log messages at the point of error and on up, but the actual error was handled by code at the appropriate level (often, the user interface level) Admin Cut out the middle man: throw (Exception) null; ;) Admin Don't know about this one. It actually seems to be sort of internal method for potentially getting Project object while not really caring about possible problems with instantiation. Might be WTF or the OP might just have gotten confused by the availability of getProject() method which was not intended for the purpose he was trying to use it for. Not really possible to tell without context which is not provided here. Admin The point of exceptions is they make the application fail such that you read a simple stack trace and see a chain of events leading up to it and, very often, fix it just based on an auto-generated stack trace. It's not a good model in that when you need certain code to be robust, it's a trial and error process to accomplish this, but it can be done. If you've dug through log traces or stepped through code, you know why nulls are so painful: Nulls hide the point at which the error occurred, and discard any information about the error. That null could get passed through other pieces of code, written to a file and days, months, however later you find, "oh, I've got these unexpected nulls in this file, where the hell did that happen?" This is really a hidden complexity issue. If you let things fail and fix them, your code handles actual data. You can express your inputs and outputs in plain English, and your function is called with one possible set of N arguments. When you return nulls, now every piece of data means "a thing" or "this is missing for some unknown reason" and now N arguments becomes 2^N possible states, most of which just indicate brokenness. So you have to add more defensive code, more complexity that does nothing useful, to put off fixing something you're probably going to need to fix anyway. Admin Sometimes you don't care WHY you didn't get back the [Project]. If it's there you can return it, if it isn't there or there was an error with getting it you return a null. If there is a problem with getting the [Project] there the calling code probably can't do anything about it. An obvious example of this is UI code. Maybe there displaying lots of different things. You still want to display the page even if there is no [Project]. To put it simply, I don't think that this is a WTF.
https://thedailywtf.com/articles/comments/offensively-defensive-offense
CC-MAIN-2019-43
en
refinedweb
KDECore #include <kservicetypefactory.h> Detailed Description A sycoca factory for service types It loads the service types from parsing directories (e.g. servicetypes/) but can also create service types from data streams or single config files - See also - KServiceType Exported for kbuildsycoca, but not installed. Definition at line 43 of file kservicetypefactory.h. Constructor & Destructor Documentation Create factory. Definition at line 30 of file kservicetypefactory.cpp. Definition at line 56 of file kservicetypefactory.cpp. Member Function Documentation - Returns - all servicetypes Slow and memory consuming, avoid using Definition at line 95 of file kservicetypefactory.cpp. Not meant to be called at this level. Implements KSycocaFactory. Definition at line 57 of file kservicetypefactory.h. Read an entry from the database. Implements KSycocaFactory. Definition at line 111 of file kservicetypefactory.cpp. Find a the property type of a named property. Definition at line 85 of file kservicetypefactory.cpp. Find a service type in the database file (allocates it) Overloaded by KBuildServiceTypeFactory to return a memory one. Definition at line 68 of file kservicetypefactory.cpp. - Returns - the unique servicetype factory, creating it if necessary Definition at line 63 of file kservicetypefactory.cpp. Virtual hook, used to add new "virtual" functions while maintaining binary compatibility. Unused in this class. Reimplemented from KSycocaFactory. Definition at line 136 of file kservicetypefactory.cpp. Member Data Documentation Definition at line 86 of file kservicetypefactory.
https://api.kde.org/4.x-api/kdelibs-apidocs/kdecore/html/classKServiceTypeFactory.html
CC-MAIN-2019-43
en
refinedweb
Components and supplies Necessary tools and machines Apps and online services About this project witch polarity, because it will break the LEDs. Step 4: About the software! Code Arduino CodeArduino #include "FastLED.h" // How many leds in your strip? #define NUM_LEDS 68 byte pixelType = 0; byte drawIn[4]; byte frameIn[NUM_LEDS*3]; // DATA_PIN 3 //#define CLOCK_PIN 13 // The bluetooth module pins #define RX_PIN 0 #define TX_PIN 1 // Define the array of leds CRGB leds[NUM_LEDS]; void setup() { // Uncomment/edit one of the following lines for your leds arrangement. //, GRB>); Serial.begin(9600); pinMode(TX_PIN, OUTPUT); pinMode(RX_PIN, INPUT); } void loop() { } void serialEvent() { pixelType = Serial.read(); switch (pixelType) { case 0: //draw mode while (!Serial.available()) {} Serial.readBytes(drawIn, 4); leds[drawIn[0]] = CRGB(drawIn[1], drawIn[2], drawIn[3]); FastLED.show(); Serial.flush(); break; case 1: //clear mode for (int i = 0; i < NUM_LEDS; i++) { leds[i] = CRGB::Black; } FastLED.show(); Serial.flush(); break; case 2: //frame in mode while (!Serial.available()) {} Serial.readBytes(frameIn, (NUM_LEDS * 3)); for (int i = 0; i < NUM_LEDS; i++) { leds[i] = CRGB(frameIn[i * 3], frameIn[(i * 3) + 1], frameIn[(i * 3) + 2]); } FastLED.show(); Serial.flush(); break; case 3: while (!Serial.available()) {} int brightnessLED = Serial.read(); FastLED.setBrightness(brightnessLED); Serial.flush(); break; } } Software to control the shadesJava No preview (download only). Schematics Author Published onSeptember 23, 2016 Members who respect this project you might like
https://create.arduino.cc/projecthub/RGBFreak/diy-rgb-led-shades-controlled-by-arduino-c23e57
CC-MAIN-2019-43
en
refinedweb
Getting started with managed code application development Applies To: Dynamics CRM 2013 This guide shows you how to write application code that connects to the Microsoft Dynamics CRM web services and invoke web methods to perform common business data operations like create, delete, update, and find. You will also be shown how to set up a project in Microsoft Visual Studio with the required namespaces, and how to handle exceptions returned from the web services. After completing this guide, you will have learned enough information to explore the many other business operations that are supported by the web services. In This Section Start a managed code project in Visual Studio
https://docs.microsoft.com/en-us/previous-versions/dynamicscrm-2013/developers-guide/dn531016%28v%3Dcrm.6%29
CC-MAIN-2019-43
en
refinedweb
- Start Date: 2018-10-31 - Relevant Team(s): Ember Data - RFC PR: - Tracking: Ember Data Packages Summary This documents presents the proposed public import path changes for ember-data, and moving ember-data into the @ember-data namespace. Motivation Reduce Confusion & Bike Shedding Users of ember-data have often noted their confusion by the existence of both direct and "god object" ( DS.) style imports for modules from ember-data. The documentation currently uses primarily the DS. style, and users have expressed interest and confusion over why the documentation has not been updated to reflect direct imports. Improve The TypeScript Experience Presence of multiple import locations confuses Typescript's autocomplete, symbol resolution, and type hinting. Simplify The Mental Model Users of ember-data complain about the large API surface area; however, a large portion of this surface area is non-essential user-land APIs that the provided adapter and serializer implementations expose. This move to packages helps us simplify the mental model in three ways. First: it gives us a natural way of dividing the documentation and learning story such that key concepts and APIs are more discoverable. Second: it allows us specifically to isolate the API surface area explosion of the provided adapter and serializer implementations and make it clear that these are non-essential, replaceable APIs. E.G. it will help us to communicate that these adapters and serializers are an implementation, not the required implementation. Third: it clarifies the roles of several concepts within ember-data that are often misused today. Specifically: the embedded-records-mixin should only be used with the RESTAdapter, and transforms are only a serialization/deserialization concern and not a way of defining custom attrs or types. Furthermore, transforms are only applicable to the serializer implementations that ember-data provides, and not to custom (and sometimes not to subclassed) serializers. Improve the Contributor Experience Contributors to ember-data are faced with a large, complex project with poor code and test organization. This makes it unduly difficult to discover what tests exist, where to add tests, where associated code lives, and even what parts of the code base relate to the feature or bug that they are looking to address. This move to packages will help us restructure the project and associated tests in a manner that is more discoverable. Provide a Clear Subdivision of Packages Today, ember-data is a large single package ( ~35KB gzipped in production). ember-data is often one of the largest dependencies emberjs users have in their applications. However, not all users utilize all parts of ember-data, and some users use very little. Providing these packages helps to clearly show the cost of various features, and better allows us to enable end users to eliminate unneeded packages. Users that implement their own adapter or serializers today must still carry the significant weight of the adapter and serializer implementations that ember-data ships regardless. This is a weight we should enable these users to eliminate. With the landing of RecordData and the merging of the modelFactoryFor RFC, it is likely that many applications will soon require far less of ember-data than they do today. ember-m3 is an example of a project that utilizes these APIs in a way that requires significantly less of the ember-data experience. Provide Infrastructure for Additional Changes ember-data is entering a period of extended evolution, of which RecordData and modelFactoryFor are only the early pieces. For example, current thinking includes the possibility of ember-data evolving to provide an ember-m3-like experience for json-api as the default out-of-the-box experience, and a rethinking of how we manage the request/response lifecycle when fulfilling a request for data. These experiences would live alongside the existing experience for a time prior to any deprecations of the current layer, and it is possible that sometimes the current experience would never be deprecated. Subdividing ember-data into these packages will enable us to provide a more seamless transition between these experiences without hoisting any package size costs onto users that do not use either the current or the new experience. Detailed design This RFC proposes import paths following the guidelines established in Ember Modules RFC #176, with two addendums to account for scenarios that weren't faced by ember: Errorsub-classes are named exports Mixinsare named exports This is done to allow for continued grouping by common usage and mental model, where otherwise users would be faced with multiple imports from length file paths. The following modules would continue to live in a monorepo that (until further RFC) would continue to live at github.com/ember/data. Notes @ember-data/model InternalModeland RootStateare tightly coupled to the store and to our provided Modelimplementation. Over time we need to uncouple this, but given their coupling to Modeland our desire to enable them to be eliminated from projects not using Model, these concepts belong in @ember-data/model, although they will not be given direct import paths. The following belong in @ember-data/modeland not in @ember-data/relationship-layerwith relationships. While this presents a mild risk of confusion due to the presence of the relationship-layerpackage, the argument for their presence here is they are a ui-layer concern being coupled to the current Modelpresentation layer and not related to overall state management of relationships which could itself be used with alternative implementations. belongsTo hasMany - The following have the same considerations as #2 but they will not be given direct import paths. PromiseManyArray ManyArray @ember-data/serializers - We should move automatic registration of transforms into a more traditional app/directory re-export for the package so that when the package is dropped they cleanly drop as well. @ember-data/relationship-layer This package seems thin but it's likely to hold quite a bit. Additional private things that would be moved here: - everything in -private/system/relationships/state BelongsToReferenceand HasManyReference - relationship logic from store/ internal-modelthat need to be isolated and extracted @ember-data/debug Moving DebugAdapter here would allow dropping it if not desired. Additionally we should likely RFC dropping it for production builds where it adds persistent unnecessary overhead for a tool meant for devs. This exists to support the ember inspector. Documented Public APIs without public import paths There are a few public classes that are not exposed at all via export today. Those classes will not be given public export paths, but the package containing their documentation and implementation is shown here: @ember-data/store Reference RecordReference StoreWrapper @ember-data/relationship-layer BelongsToReference HasManyReference @ember-data/model PromiseBelongsTo PromiseRecord Migration Blueprints, guides, docs, and twiddle would be updated to use the new @ember-data/ package imports. A codemod would be provided to convert from the existing import locations to the new ones, as well as lint rules for encouraging their use. The package ember-data would continue to exist, much like ember-source. Initially, this package would provide all of the subpackages as dependencies as well as the respective re-exports for supporting the existing import paths. After a time, the existing paths would be deprecated. Users who have resolved the deprecations may choose to convert to consuming only the packages they still require directly, by dropping ember-data from their package.json and adding in the individual @ember-data/ packages as necessary. Ultimately, the default ember-data story in ember-cli would change to install select packages from @ember-data directly. How we teach this This RFC should be seen as a continuation of the javascript-modules RFC that defined explicit import paths for emberjs. Codemods and lint rules would be provided to convert existing imports to the new syntax. Existing import locations would continue to exist for a time but would at some point in the future be made to print build-time deprecations. End users would need to run the codemod at some point, but no other changes will be required. Ember documentation and guides would be updated to reflect these new import paths as well as to utilize the new package divisions to improve the teaching story. Drawbacks - A Tiny amount of churn - Sub-packages will require sprinkling significant numbers of excess package.json files throughout our repo. - Our import paths may not align with the expected mental model for addon import paths going forward (no /src/in path) Alternatives - Divide into packages without exposing the new division publicly - argument for: Don't expose churn to end users without a clear win, we aren't 100% sure what belongs in a vague "future ember-data", so wait until we are sure. - rebuttal: The churn is minimal and mostly automated (codemod). There are clear wins here for many users. We should not hold up progress now on an uncertain future. Dividing into packages now gives us more options for how to manage future evolution. Regardless of when we become certain of what belongs in "future ember-data", these packages would need to exist alongside at least for a time. - Don't divide into packages until nebulous future RFCs have landed - argument for: This argument is an extension of alternative 1 in which we wait for specific concepts to mature and materialize that we have discussed internally, including a significant rework of how we manage the request/responselifecycle. These new feature RFCs would come with corresponding deprecation RFCs for parts of the system they either fully replace or make vestigial. - rebuttal: The argument here is a variation of the argument in alternative 1 and the rebuttal merely extends that rebuttal as well. These future deprecations would necessarily be long-tail, if we deprecate at all. There is the option to have both old and new experiences live side-by-side. Additionally, if we deprecate and then land @ember-data/packagesthere is both an equal amount of churn and fewer options for how to manage those deprecations. - Use the @embernamespace. argument for: ember-datais an official package and we wish to position it centrally within the emberecosystem. This argument has been presented by other core teams in response to previous attempts to move forward with a packages RFC for ember-data. rebuttal: ember-cliand glimmerare also official packages, but with their own namespaces. Additionally re-using the @embernamespace would only further confusion that many folks already have regarding: - where emberends and ember-databegins. - whether ember-datais required or optional - whether other data layers are seen as "bad practices" (they are not) - what packages are provided by ember-datavs ember ember-data's status as a team, in the guides and in release blog posts on emberjs.com, as well as presence in the default blueprint provided by ember-climake clear it's status as an official offering. Using the @embernamespace is not required for this. This argument also necessarily foments an untrue presupposition: that ember-datais the right choice for every app. While we strive to make this the case, it would be very difficult to claim this today, and may never be true, as every app presents unique concerns and needs. Finally, using the @embernamespace would leave us in the unfortunate position of either always scoping all of our packages to @ember/data/or of fighting with emberjsfor package names. - This RFC but with Adapters and Serializers broken out into the packages @ember-data/json @ember-data/rest @ember-data/json-api. argument for: grouping the adapter / serializer "by API spec" feels more natural and would allow for users to drop only the versions of adapters / serializer they don't require. rebuttal: Even without considering future changes to ember-data's API surface, there are several issues with this approach. The implementations inherit each other: JSONAPISerializer extends RESTSerializer extends JSONSerializer extends Serializer JSONAPIAdapter extends RESTAdapter extends Adapter The adapter / serializer pairings aren't coupled - It is fairly common to use the JSONAPIAdapterwith the RESTSerializeror with a custom serializer that extends the RESTSerializerand vice-verse. - Even when using a consistent spec ( json-apior rest) it is common to need a fully custom serializer. The division of needs is at least equally between adapter/serializer as it is between specs. Transforms are an implementation detail for all the provided serializers - But they are not required and likely not even used by custom serializers. Packages for automatically registered fallbacks would fit poorly. - Serializers: "-default" "-rest" "-json-api" - Adapters: "-rest" "-json-api" Today, we use multiple serializers for a single type based on entry-point Model.serialize(per-type) / Model.toJSON( "-json") / Adapter.serialize(per-adapter) That said, this organization is also one of the only-nods to future RFCs this RFC concedes. The existing provided implementations all follow roughly the same interface for their implementations, and that interface is something we strongly wish to change. For this reason, it seems advantageous to keep the existing implementations together such that the delineation between a new experience and this experience can be kept clear.
https://emberjs.github.io/rfcs/0395-ember-data-packages.html
CC-MAIN-2019-43
en
refinedweb
DYModalNavigationController is a simple UINavigationController subclass written in Swift 5.0. Use cases: - present a small size view controller with rounded edges over the current context modally (e.g. if the content is rather small and the standard modal presentation would show a lot of empty space). Set a fixed size so that the navigation controller’s size is not adjusted when the screen orientation changes. - present a modal view controller over the current context with top, bottom, left, right margins with a fade in transition. The presenting view controller behind it is still visible at the margins (unless margins set to 0). The size adjusts automatically when the screen orientation changes. Example project To checkout the example project, simply clone the repo or download the zip file. Features - Create a DYModalNavigationController with a fixed size in case your view controller instance should not change its size when changing the screen orientation. - Set a background blur or dim effect - Customise the corner radius of the DYModalNavigationController view. - Set a slide in/out animation (customisable animation movement directions) or a fade in/out animation - Customise the drop shadow - Customise the animation transition duration Installation Installation through Cocoapods or Carthage is recommended. Cocoapods: target ‘[project name]’ do pod ‘DYModalNavigationController’, ‘~> 1.0 end Carthage: Simply add the following line to your Cartfile. github "DominikButz/DYModalNavigationController" ~> 1.0 Afterwards, run "carthage update DYModalNavigationController –platform iOS" in the root directory of your project. Follow the steps described in the carthage project on github (click on the carthage compatible shield above). Make sure to import DYModalNavigationController into your View Controller subclass: import DYModalNavigationController Usage Code example: Fixed size DYModalNavigationController with background blur let size = CGSize(width: 300, height: 200) var settings = DYModalNavigationControllerSettings() settings.slideInDirection = .right settings.slideOutDirection = .right settings.backgroundEffect = .blur self.navController = DYModalNavigationController(rootViewController: contentVC(), fixedSize: size, settings: settings) Code example: DYModalNavigationController with margins and fade effect var settings = DYModalNavigationControllerSettings() settings.animationType = .fadeInOut // animationType .slideInOut is default setting! self.navController = DYModalNavigationController(rootViewController: contentVC(), fixedSize: nil, settings: settings) // with fixedSize nil, the size will be set according to the top, bottom, left, right margins in the settings. Code example: DYModalNavigationController with custom animations var settings = DYModalNavigationControllerSettings() settings.animationType = .custom self.navController = DYModalNavigationController(rootViewController: contentVC(), fixedSize: size, settings: settings, customPresentationAnimation: { (transitionContext) in self.foldOut(transitionContext: transitionContext, navController: self.navController) }, customDismissalAnimation: { (transitionContext) in self.foldIn(transitionContext: transitionContext, navController: self.navController) }) Change log Version 1.0 initial version. Version 1.1 added customPresentationAnimation and customDismissalAnimation to the initializer. Author License DYModalNavigationController is available under the MIT license. See the LICENSE file for more info. Latest podspec { "name": "DYModalNavigationController", "version": "1.1", "summary": "UINavigationController subclass with support for custom size and present and dismiss animations.", "swift_versions": "5.0", "description": "UINavigationController subclass that supports setting a custom size and slide in/out and fade in/out animations.nPresented over the "current context".", "homepage": "", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "dominikbutz": "[email protected]" }, "source": { "git": "", "tag": "1.1" }, "platforms": { "ios": "11.0" }, "source_files": "DYModalNavigationController/**/*", "exclude_files": "DYModalNavigationController/**/*.plist", "public_header_files": "DYModalNavigationController/**/*.h" } Wed, 08 May 2019 10:38:05 +0000
https://tryexcept.com/articles/cocoapod/dymodalnavigationcontroller
CC-MAIN-2019-43
en
refinedweb
Frank Wiles wrote: > Hi! Probably ModPerl:: is the best fit, IMHO :) Though you may have more popularity with /Apache/ in case people aren't aware about the existance of the ModPerl:: namespace. -- _____________________________________________________________ Stas Bekman mailto:[email protected] MailChannels: Assured Messaging(TM) The "Practical mod_perl" book --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
http://mail-archives.apache.org/mod_mbox/perl-dev/200601.mbox/%[email protected]%3E
CC-MAIN-2019-43
en
refinedweb
Introduction to ADO.NET Databases Microsoft SQL Server and Microsoft Visual Studio 2010 Introduction Microsoft SQL Server is mainly used to create and maintain computer databases. It does not provide the means of creating graphical elements that would make it easy for a regular user to take advantage of its values. To create an application made of graphical user interface (GUI) objects, you must use a separate environment. To make this easy, Microsoft created a very direct link between Microsoft Visual Studio and Microsoft SQL Server. The communication is so smooth that, from Microsoft Visual Studio, you can use Microsoft SQL Server directly without having to formally open the database Application using the New Project dialog box, the System.Xml.dll namespace is directly included in your application. Getting Access to ADO.NET Libraries The classes used to create ADO.NET databases are defined in the System.Data namespace and are stored in the System.Data.dll library. If you create a Windows Application from the New Project dialog box, Microsoft Visual Studio add the System.Data namespace in your file(s). Connection to a Microsoft SQL Server Working from the Server TTo don't have to first explicitly log on to a Microsoft SQL Server. You can establish a connection from Microsoft Visual Studio. To do this, first display the Server Explorer (on the main menu, you can click View -> Server Explorer). To create a new connection, you can right-click the Data Connections node and click Add Connection.../p>. Introduction to the SQL Connection A Class for a SQL Connection TTo support a connection to Microsoft SQL Server, the .NET Framework provides the SqlConnection class: public sealed class SqlConnection : DbConnection, ICloneable The SqlConnection class is defined in the System.Data.SqlClient namespace. Before using this class, you can first include this namespace in your file: using System; using System.Data; using System.Windows.Forms; using System.Data.SqlClient; public class Exercise : Form { public Exercise() { InitializeComponent(); } private void InitializeComponent() { Text = "Introduction to ADO.NET"; StartPosition = FormStartPosition.CenterScreen; } static int Main() { System.Windows.Forms.Application.Run(new Exercise()); return 0; } } To connect to a database, you can first declare a variable of type SqlConnection using one of its two constructors. The default constructor allows you to declare the variable without specifying how the connection would be carried. The second constructor takes as argument a string value. Its syntax is: public SqlConnection(string connectionString); You can create the necessary (but appropriate) string in this constructor when declaring the variable. This would be done as follows: public class Exercise : Form { public Exercise() { InitializeComponent(); } void InitializeComponent() { SqlConnection connection = new SqlConnection("Something"); } } If you want, you can first create the string that would be used to handle the connection, then pass that string to this construction. This would be done as follows: void InitializeComponent() { string strConnection = "Something"; SqlConnection connection = new SqlConnection(strConnection); } To support the connection as an object, the SqlConnection class is equipped with a property called ConnectionString that is a string. If you use the default constructor to prepare the connection, you can first define a string value, then assign it to this property. This would be done as follows: void InitializeComponent() { string strConnection = "Something"; SqlConnection connection = new SqlConnection(); connection.ConnectionString = strConnection; } The Attributes of a Connection String To use a Sql is part of this string is not case-sensitive (even though you are working on a C# application). This whole group is either passed as a string to the second constructor: void InitializeComponent() { SqlConnection connection = new SqlConnection("Key1=Value1;Key2=Value2;Key_n=Value_n"); } or assigned as a string to the SqlConnection.ConnectionString property: void InitializeComponent() { string strConnection = "Key1=Value1;Key2=Value2;Key_n=Value_n"; SqlConnection connection = new SqlConnection(); connection.ConnectionString = strConnection; } How you create these attributes Microsoft SQL Server installed. You can specify this from the Connect to Server dialog box where you would select the machine from the Server Name combo box: If you are working from the Add Connection dialog box, to see the list of servers and select one, you can click the arrow of the Server Name combo box: If you are programmatically connecting to a computer using the SqlConnection class, the connection string includes an attribute named Server, or Data Source, or Address, or Addr, or Network Address. For the rest of our lessons, this attribute will be referred to as the computer attribute. If you are creating your application on the same computer on which SQL Server is installed, the computer attribute can be identified as (local). Here is an example: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=(local); "); } If you are working from the Add Connection dialog box, you can type (local) in the Server Name combo box and press Enter: If you know the name of the computer, you can assign it to the computer attribute. Here is an example: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=central; "); } In the same way, if you are connecting to a specific computer, you must provide its name. Here is an example: SqlConnection connection = new SqlConnection("Data Source=central; ") As an option, you can include the name of the computer in single-quotes. If you are working from the Add Connection dialog box, you can type the name of the server without clicking the arrow of the combo box and press Enter. Remember that the computer attribute is a requirement regardless of the (type of) application, even if it is local. The Authentication. If you are working from the Add Connection dialog box, you can click the Use Windows Authentication radio button. If you are programmatically establishing the connection, the connection string of the SqlConnection class includes an attribute called Trusted_Connection or Integrated Security that can have a value of true, false, yes, no, or SSPI with the SSPI having the same indication as true. If you are establishing a trusted or simple connection that doesn't need to be verified, you can assign a value of true or SSPI. Here is an example: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=(local);Trusted_Connection=SSPI"); } When you use the true or SSPI values, the user name (if any) and the password (if any) of the person opening your application would be applied. For example, if the application is being opened on Microsoft Windows XP Professional or Windows 7 that has a default user name and password, the application would be opened fine without checking security. If you are using the Connect to Server dialog box and if you want to specify the username and the password, in the Authentication combo box, select SQL Server Authentication. If you are programmatically establishing the connection, to apply authentication, you can assign false or no to the security attribute you selected. Here is an example: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=(local);Integrated Security=no"); } The Microsoft SQL Server Login To login using SQL Server Authentication or if the Trusted_Connection or the Integrated Security option is set to false or no, you must have a special account that would allow you to access the server. This account is called a login. To visually create a login, in the Object Explorer of Microsoft SQL Server, expand the Security node. Right-click Login and click New Login... This would display the Login - New dialog box. In the Login Name text box, type the user name of the person whose account you are creating (remember that the account must have been created already in the computer (Microsoft Windows XP-7) or on the server). If you provide a user name that cannot be found in the system, when trying to finalize, you would receive an error. After entering the user name, you must specify how the user would be authenticated. You have a choice between the operating system and Microsoft SQL Server. After creating a login, a person can use it to connect to the server. If you are using the Connect To Server dialog box, after selecting SQL Server Authentication, the user can type that login name in the Login combo box. When Microsoft SQL Server is installed, it creates a default account named sa. You can also use it as a login and provide its password. If you are programmatically creating the connection, to specify the login name, after assigning false or no to the security attribute, you must use the User ID attribute and assign it a valid username. Here is an example: void InitializeComponent() { string strConnection = "Server=(local);" + "Integrated Security=no;" + "User ID=wmessmann"; SqlConnection connection = new SqlConnection(strConnection); } The Password If you are manually connecting to the server using the Connect to Server dialog box, after selecting SQL Server Authentication, besides the username, you must provide a password to complete the authentication: If you are using the Add Connection dialog box, after selecting the Use SQL Server Authentication radio button, after typing a username, you can also enter a password in the indicated text box. If you are programmatically establishing the connection, besides the username, to specify the password, you can user either the PASSWORD or the PWD (remember that the attributes are not case-sensitive (but the value of the password is)) attribute and assign it the exact password associated with the User ID attribute of the same connection string. Here is an example: void InitializeComponent() { string strConnection = "Server=(local);" + "Integrated Security=no;" + "User ID=wmessmann;PWD=$outh~@kotA"; SqlConnection connection = new SqlConnection(strConnection); } In some circumstances, you can use an empty password in which case you would assign an empty string to the password attribute. The Database Microsoft SQL Server ships with a few default databases. In Microsoft SQL Server Management Studio, the available databases and those you will create are listed in a node called Databases. To display the list of databases, you can expand the name of the server and expand the Databases node. If you are not trying to connect to a database, you don't need to locate and click any. If you are attempting to connect to a specific database, in Microsoft SQL Server Management Studio, you can simply click the desired database. If you are working from the Add Connection dialog box, if you want to establish a connection to a specific database,: void InitializeComponent() { SqlConnection connection = new SqlConnection(Server=(local);Database=;); } Another alternative is to assign an empty, single-quoted, string to this attribute. Here is an example: void InitializeComponent() { string strConnection = "Server=(local);Initial Catalog='exercise1';"; SqlConnection connection = new SqlConnection(strConnection); } As mentioned above, the Database attribute is optional, especially if you are only connecting to the computer and not to a specific database. Additional Attributes. Opening and Closing a Connection Using an Existing Connection When working in Microsoft Visual Studio, if you create a connection using the Server Explorer, a link to the actual database would display as a sub-node of the Data Connection link. You can re-use one of these connections when necessary. As an alternative, on the main menu, you can click Data -> Add New Data Source... In the Data Source Configuration Wizard, click Database and click Next. In the second page of the wizard, select an existing connection in the combo box. Opening a Connection If you are manually connecting to a server or a database using the Microsoft SQL Server Management Studio, the steps we have described so far allow you to open the connection. If you are creating the connection using the Add Connection dialog box, after selecting the options, you can click OK. After programmatically creating a connection string, to apply it and actually establish the connection, you must call the SqlConnection.Open() method. Its syntax is: public override void Open(); Here is an example of calling it: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=(local);Initial Catalog='exercise1';"); connection.Open(); } As you can see, this method does not take any argument. The SqlConnection object that calls it is responsible to get the connection string ready: Closing or Deleting a Connection After using a connection and getting the necessary information from it, you should terminate it. If you are working in Microsoft SQL Server Management Studio, to close the connection, you can simply close the window as an application. If you are working in Microsoft Visual Studio, to close a connection, you can right-click it in the Server Explorer and click Delete: If you are working from a SqlConnection object, to close a connection, you can call the SqlConnection.Close() method. Its syntax is: public virtual void Close(); This method is simply called to close the current connection. Here is an example of calling it: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=(local);Initial Catalog='exercise1';"); connection.Open(); // Do some things here connection.Close(); } While you should avoid calling the Open() method more than once if a connection is already opened, you can call the Close() method more than once. Disposing of a Connection The SqlConnection class is derived from a class named DbConnection: public abstract class DbConnection : Component, IDbConnection, IDisposable The DbConnection class implements the IDisposable interface. This means that, to close the connection and free its resources, you can use the using keyword. This would be done as follows: void InitializeComponent() { using (SqlConnection connection = new SqlConnection("Data Source=(local);Integrated Security=yes")) { conDatabase.Open(); } } When this code executes, it opens the connection. Inside of the curly brackets, you can do whatever you want. When the compiler reaches the closing curly bracket, it calls the SqlConnection.Close() method, which means you do not need to remember to close it. Commanding a Database We have learned different ways of connecting to a server. After establishing a connection, if you are successful, the database system becomes available to you and you can take actions, such as creating a database and/or manipulating data. An action you perform on the database server or on a database is called a command. To support the various commands you can perform on a Microsoft SQL Server database, the System.Data.SqlClient namespace provides the SqlCommand class. then create a string that would carry the action to perform. Once the string is ready, you can assign it the CommandText property. This would be done as follow: void InitializeComponent() { SqlCommand CommandToExecute = new SqlCommand(); string strCommandToExecute = "Blah Blah Blah"; CommandToExecute would be done as follows: void InitializeComponent() { string strConnection = "Server=(local);" + "Integrated Security=no;" + "User ID=sa;PWD=$outh~@kotA"; SqlConnection connection = new SqlConnection(strConnection); SqlCommand CommandToExecute = new SqlCommand(); string strCommandToExecute = "Blah Blah Blah"; connection.Open(); CommandToExecute.Connection = connection; CommandToExecute.CommandText = strCommandToExecute; connection.Close(); } Instead of declaring a SqlCommand variable and the command text separately, as an alternative, you can define the command text when declaring the SqlCommand variable. To do this, you can use the second constructor of the SqlCommand class. The syntax of this constructor is: public SqlCommand(string cmdText); Once again, after using this constructor, you must specify what connection would carry the action. To do this, you can assign a SqlConnection object to the Connection property of your SqlCommand. Here is an example: void InitializeComponent() { string strConnection = "Server=(local);" + "Integrated Security=no;" + "User ID=sa;PWD=$outh~@kotA"; SqlConnection connection = new SqlConnection(strConnection); SqlCommand CommandToExecute = new SqlCommand("Blah Blah Blah"); connection.Open(); CommandToExecute.Connection = connection; connection: void InitializeComponent() { SqlConnection connection = new SqlConnection("Server=(local);" + "Integrated Security=no;" + "User ID=sa;PWD=$outh~@kotA"); SqlCommand CommandToExecute = new SqlCommand("Blah Blah Blah", connection); connection.Open(); CommandToExecute.Connection = connection; connection.Close(); } If you had initiated the action using the default constructor of the SqlCommand class, you can assign a SqlConnection object to the Connection property of the SqlCommand class. In the next sections and future lessons, we will study the types of commands that can be carried. Command Execution After establishing a connection and specifying what command needs to be carried, you can execute it. To support this, the SqlCommand class is equipped with the ExecuteNonQuery() method. Its syntax is: public override int ExecuteNonQuery(); This method does not take any argument. The SqlCommand object that calls it must have prepared a valid command. In future lessons, we will see that there are other ways a SqlCommand object can execute commands. Well, the Command Timed Out In some cases, In this and the next few lessons, all of the commands we perform will be communicated as strings. When we study (stored) procedures, we will see other types of commands. To allow you to specify the type of command you want to perform, the SqlCommand class is equipped with the CommandType property, which is based on the CommandType enumeration. The CommandType enumeration has three members: StoredProcedure, TableDirect, and Text. For a SqlCommand object, the default value is Text. Reading Data A data command is used to initiate an action to perform on a database. To read data of a database, one of the objects you can use is called a data reader. To know how a data reader works, imagine you have a list of values as follows: If you use a data reader to read these values, the compiler visits the first value to read it. After reading it, the compiler moves to the second value. After visiting the second value, the compiler moves to the third value and so on. One of the particularities of a data reader is that, once it visits a value, reads it, and moves to the next value, the compiler cannot refer to the previous value. This can be illustrated as follows: The SQL Data Reader To support data readers, the .NET Framework provides, for a Microsoft SQL Server database, a class named SqlDataReader. To get a data reader, you can declare a variable of type SqlDataReader. This class does not have a constructor. This means that, to use it, you must (directly) specify where it would read its data. To provide data to the reader, the SqlCommand class is equipped with the ExecuteReader() method that is overloaded with two versions. The simplest version of this method uses the following syntax: public SqlDataReader ExecuteReader(); Based on this, before using a data reader, you should first create a command that would specify how data would be acquired. Once the data is read, you can pass it to the data reader by assigning the result of a call to a SqlCommand.ExecuteReader() method to a SqlDataReader object. Using a SQL Data Reader Once data is supplied to the reader, you can access it, one value at a time, from top to bottom. To access data that the reader acquired, you can call its Read() method whose syntax is: public override bool Read(); As mentioned already, the Read() method simply reads a value and moves on. When reading the values, as mentioned already many times, the data reader reads one value at a time and moves to the next.
http://www.functionx.com/vcsharp/adonet/Lesson02.htm
CC-MAIN-2017-30
en
refinedweb
This Looking ahead. Are all the low-color icons and shapes in the library finally replaced by high color modern equivalents? And what about including Visio in an Ultimate SKU? With only 5 announced SKUs, there seems to be no improvement on that one. Hi sevenflavor, The short answer about icons is that we have improved their look with more color depth. We'll have the details in a future post. We'll also discuss our SKU / packaging later on, but the plan is for Visio to continue to be sold separately from the Office SKUs. Disappointed to see that the 'page' metaphor is still there. The canvas should be a unlimited space and only when one goes to print / export does one choose the scale and print area. Hi Damian, Stay tuned for a discussion of the new AutoSize Page feature in Visio 2010. It effectively turns the page into an infinite canvas. I would think that by the year 2010, Visio would have entered the 3-D world. I am tired of looking at simplistic 2D figures that fail to convey meaningful information effectively. "It effectively turns the page into an infinite canvas." That sounds great, very annoying having to deal with the page artefact when forming an extensive system schema. Would be nice to be able to design on an infinite page and define different diagram sections to be grouped for printing. "Sub-processes and containers break up a diagram into understandable pieces." Two thumbs up here!! Can't wait. Looks great... but it would be nice if Visio could do Finite Automaton diagrams... perhaps someday. Will there be forward/reverse engineer Database functions? We lost that in 2007 pro, and I really relied on that feature. Thanks! (bwoody @ microsoft . com) It's been always a cumbersome job to define classes, methods, properties etc. within the namespaces simply because it was a very "mouse click intensive" job, and to find an object within the namespace using the dropdown you have to actually keep scrolling through it. I would like to see auto-complete features in the next release... Hi, i would love to see Visio drawing from some of the SmartArt things in Office 2007. Sometimes it is just a total mismatch if you have to come up with a document and you somehow have to use both diagrams created in Visio and other office applications. I am in the same line that Bernie Hill, I am architect and use Visio how my main design/drawing program but I miss in Visio a 3D tool. The infinite canvas will be right for certain people in the corporate world, but the rest are OK with the page sizes (Arch/Eng. ISO formats). I miss too in Visio Technnical a tool that MS substract from the Original Visio, this was the "property line" that was very useful for drawing surveying draws. So, I guess this blog isn't being monitored? No response to my question on modeling databases anymore? Ah well. I begin use Visio with the version 4 to 2007 version, I want to know if this aplication under MS go to be supporting technical architecture and engineering drawings or is best said Bye and change to other drawing aplication ? I liked it because is low learning curve and that the originals developers know the real drafstmans environments.
https://blogs.msdn.microsoft.com/visio/2009/07/17/visio-2010-technical-preview-released/
CC-MAIN-2017-30
en
refinedweb
Using the following with TF .9.0rc0 on 60,000 (train) and 26,000 (test) on or so records with 145 coded columns (1,0) trying to predict 1 or 0 for class identification.. classifier_TensorFlow = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],n_classes=2, steps=100) classifier_TensorFlow.fit(X_train, y_train.ravel()) WARNING:tensorflow:TensorFlowDNNClassifier class is deprecated. Please consider using DNNClassifier as an alternative. Out[34]:TensorFlowDNNClassifier(steps=100, batch_size=32) score = metrics.accuracy_score(y_test, classifier_TensorFlow.predict(X_test)) print('Accuracy: {0:f}'.format(score)) Accuracy: 0.923121 print (metrics.confusion_matrix(y_test, X_pred_class)) [[23996 103] [ 1992 15]] classifier_TensorFlow = learn.DNNClassifier(hidden_units=[10, 20, 10],n_classes=2) I don't think it is a bug, from the source code of DNNClassifier, I can tell that its usage differs from TensorFlowDNNClassifier. The constructor of DNNClassifier doesn't have the steps param: def __init__(self, hidden_units, feature_columns=None, model_dir=None, n_classes=2, weight_column_name=None, optimizer=None, activation_fn=nn.relu, dropout=None, config=None) As you could see here. Instead the fit() method that DNNClassifier inherited from BaseEstimator now has the steps param, notice that the same happens with batch_size: def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None): For the "it hangs with no completion?", in the doc of the fit() method of BaseEstimator it is explained that if steps is None (as the value by default), the model will train forever. I still don't get why I would like to train a model forever. My guesses are that creators think this way is better for the classifier if we want to have early stopping on validation data, but as I said is only my guess. As you could see DNNClassifier doesn't give any feedback as the deprecated TensorFlowDNNClassifier, it is supposed that the feedback can be setup with the 'config' param that is present in the constructor of DNNClassifier. So you should pass a RunConfig object as config, and in the params of this object you should set the verbose param, unfortunately I tried to set it so I can see the progress of the loss, but didn't get so lucky. I recommend you to take a look at the latest post of Yuan Tang in his blog here, one of the creators of the skflow, aka tf learn.
https://codedump.io/share/zkhZz3IEH1fo/1/tensorflowdnnclassifier-class-is-deprecated-but-replacement-does-not-seem-to-work
CC-MAIN-2017-30
en
refinedweb
Help:Maintenance template removal Many Wikipedia pages display maintenance templates addressed to problems with the topic or content of the page. You may have arrived at this page after you clicked on a link in just such a maintenance template, that said "Learn how and when to remove this template message". These maintenance templates are added and removed by volunteers, and this help page explains the process through which this happens. Contents Overview Maintenance templates (or "tags") are never removed automatically. If you fix the issue, the tag will still remain until you or someone else manually removes it. The mechanics of removal is usually as simple as clicking edit at the top of a page or in the section involved, removing the code that produces the display of the template, leaving an edit summary, and saving the page. However, it is not okay to remove maintenance templates until the issue flagged by the template is remedied first. Wikipedia works because of the efforts of volunteers like you and their bold edits to assist us in building this encyclopedia project. Fixing problems and then removing maintenance templates when you're done is important in that effort. Addressing the flagged problem We don't know which maintenance tag brought you to this page, and thus what problem needs attention. However, every maintenance template contains links to help pages, policies, guidelines, or other relevant pages that provide information on the problem the template was placed to flag. You may also find guidance on some of the more commonly seen there are a host of other issues that may be flagged, from tone and style of writing, to structure and formatting, lack of links to other articles or of links to the article at issue, compliance with Wikipedia's manual of style, the lack of a lead section and others. Fixing the issue is the condition you need to fulfill before removing the template, and that does require some effort on your part—to understand both the problem and its solution. An example If the issue flagged by the maintenance template is that the article contains no references, the template used might be {{Unreferenced}} – typically placed by the code that you would see when editing: {{Unreferenced|date=July 2017}}. It is important to understand that what you see when reading an article, and what you see when editing it, is different. Thus, this code, only seen when editing, results in the display when you are reading of the 'called' template below: This template contains a number of links indicated by words and phrases shown in blue. Three of these links are pages that when explored, should provide context and resources for you to understand why the template belonged, here should likewise contain relevant explanatory links addressed to whatever its issue is. Read these explanatory and contextual pages to learn about the problem and what it is you need to do to take care of it. This particular template, being very common, is further addressed in the specific template guidance section below. When to remove Most templates are not meant to be in articles permanently. Any user without a conflict of interest may remove a template in any of the following circumstances: - When they have addressed the issue the template raises; - When they determine; however, if the issue appears contentious, seek consensus on the talk page); - When there is consensus on the talk page (or elsewhere) as to the flagged issue; - When it reasonably appears that the template is no longer relevant, such as a {{Current}}template appearing in an article that no longer documents a current event. - Some neutrality tags such as Conflict of Interest (COI) and Neutral point of view (POV) require that the tagging editor initiate a dialog (generally on the article's talk page), to sustain their placement.[under discussion] When not to remove A template should not be removed if any of the following applies: - When the issue has not yet been resolved; - When there is ongoing activity or discussion related to the template issue; - When you do not understand the issues raised by the template; - When you simply disagree with the template (seek consensus first). - You have been paid to edit the article or have some other conflict of interest.[under discussion] Removal Okay? You carefully read the help pages and have thoroughly fixed the problem? Alternatively, you have made a considered determination that the template is not, or is no longer, applicable? Good. Thank you! Now, to remove the maintenance template: - Either click on "edit" at the top of the page, or if the maintenance template is not at the top but somewhere in the body of the article, you might use instead a section edit link; - Remove the template code. The template code you will see in edit mode will usually be in the form (as in the example above): {{Name of template|date}} - Leave a descriptive edit summary, e.g., "removed [insert the name of template] as I have fixed the issue." - Click Save page. That's it. Changing template In some case it may be more appropriate to switch the template to another applicable one after your changes, rather than just removing it. Some templates flag highly discrete issues where this would not come into play. For example, if an article is "orphaned" – no links from other pages in the main article namespace link to it – then once that is taken care of by introducing such links, the issue is gone entirely and the tag's removal is unambiguous. However, for some types of issues, the problem flagged may imply secondary problems that will still exist after you take care of the main issue. Case in point is the template example used of {{Unreferenced}}. It is directed at pages with no references. Thus, adding just one suitable reference renders that template no longer applicable. However, if that is the "fix", it does not take care of the overarching issue of poor sourcing. In that case, depending on the type, quality, depth and manner of sourcing added to fix the issue, a change to a different template may be appropriate, such as {{refimprove}}, {{No footnotes}}, {{Primary sources}} or a host of others listed at Wikipedia:Template messages/Sources of articles. In some cases, it may be helpful to request that the tagging editor provide the section version of the template to the section(s) where problems still exist, or use inline notation for finer clarity. Specific template guidance This section provides guidance on how to address some of the more common specific templates that may have brought you here. These are by no means all the types of templates you may encounter. {{Multiple issues}} Some articles will be flagged for multiple discrete problems using a single template: {{Multiple issues}}. If you take care of one or more problem=July 2017}},=July 2017}},=July 2017}},=July 2017}}, other=July 2017}},}} {{Advert}}, typically placed by the code {{Advert|date=July 2017}}, and having redirects such as {{Advertisement}}, {{Advertising}}, {{Ad}} and {{Puff}}, and displaying when reading as: flags the issue of an article that read}} {{POV}}, typically placed by the code {{POV|date=July 2017}},=July 2017}},. Researching the tagged issue The template may have links to guidance on its face and more specific information can be found with a Wikipedia search. When viewed in the Edit interface, header maintenance tags are typically placed in the first lines of the article. The first parameter is the name of the template, some templates may have additional parameters such as the month and year it was placed. Example: {{Unreferenced|date=July 2017}} Additional guidance on the tagged issue can be found by searching the Wikipedia, with the Template: suffix, followed by the template's name. For example, searching the Wikipedia for Template:Unreferenced will take you to the guidance at Template:Unreferenced. The accompanying documentation for all maintenance templates can be located.
https://infogalactic.com/info/Help:Maintenance_template_removal
CC-MAIN-2017-30
en
refinedweb
Introduction to Dialog Boxes Introduction: Application: Introducing Dialog Boxes Dialog Box Creation To create a dialog box, you start with a form, which you can get by creating a Windows Application or deriving a class from Form. Here is an example: using System; using System.Drawing; using System.Windows.Forms; public class Exercise : System.Windows.Forms.Form { public Exercise() { InitializeComponent(); } private void InitializeComponent() { Text = "Domain Configuration"; Width = 320; Height = 150; Location = new Point(140, 100); StartPosition = FormStartPosition.CenterScreen; } } public class Program { static int Main() { System.Windows.Forms.Application.Run(new Exercise()); return 0; } }’s: private void InitializeComponent() { Text = "Domain Configuration"; Width = 320; Height = 150; Location = new Point(140, 100); StartPosition = FormStartPosition.CenterScreen; FormBorderStyle =: private void InitializeComponent() { Text = "Domain Configuration"; Width = 320; Height = 150; Location = new Point(140, 100); StartPosition = FormStartPosition.CenterScreen; FormBorderStyle = FormBorderStyle.FixedDialog; MinimizeBox = false; MaximizeBox = false; } This would produce: Application:. Application:. Application:: private void InitializeComponent() { Text = "Domain Configuration"; Width = 320; Height = 150; Location = new Point(140, 100); StartPosition = FormStartPosition.CenterScreen; FormBorderStyle =. Application: Using Various Forms private void btnNewProperty_Click(object sender, EventArgs e) { PropertyEditor dlgEditor = new PropertyEditor(); dlgEditor.ShowDialog(); } An Automatic Dialog Box Microsoft Visual Studio provides a fast way to create.
http://www.functionx.com/vcsharp2010/dlgboxes/introduction.htm
CC-MAIN-2017-30
en
refinedweb
In today’s Programming Praxis exercise, our goal is to write a program that can tell us on which lines each identifier and operator in a program appers. Let’s get started, shall we? Some imports: import Data.List import qualified Data.List.Key as K import Language.Haskell.Lexer Rather than muck about with brittle regular expressions or something to that effect, we’ll just use a proper Haskell lexer library. Note that the one we’re using comes from the haskell-lexer package, which shares a module name with the haskell-src package that comes with the Haskell Platform. When running this program, pass -hide-package haskell-src as an argument to GHC. With that out of the way, all we need to do is read a file, list all the tokens and group all the identifiers by line. main :: IO () main = do file <- readFile "test.hs" mapM_ putStrLn . map ((\((n:_), ls) -> unwords $ n : nub ls) . unzip) . K.group fst $ K.sort fst [(s, show $ line p) | (tok, (p,s)) <- lexerPass0 file, elem tok [Varid, Conid, Varsym, Consym]] Running this algorithm on its own source code produces the following: $ 7 8 9 . 7 8 Conid 10 Consym 10 IO 5 K 2 Varid 10 Varsym 10 as 2 elem 10 file 6 9 fst 8 lexerPass0 9 line 9 ls 7 main 5 6 map 7 mapM_ 7 n 7 nub 7 p 9 putStrLn 7 qualified 2 readFile 6 s 9 show 9 tok 9 10 unwords 7 unzip 8 Looks like everything is working properly. Tags: bonsai, code, Haskell, kata, praxis, programming, xref
https://bonsaicode.wordpress.com/2011/04/22/programming-praxis-xref/
CC-MAIN-2017-30
en
refinedweb
An ether being in cyberspace calling itself - Julia Anne Case - uttered: | | Subject: Duplicate Mail... | Date: Fri, 14 Apr 1995 07:44:07 -0400 (EDT) | | I Some of it is due to people replying (or group replying) without touching up the header portion. Julia will note that she will receive this message twice. Once from me. The other remailed by the list. When people do this style of replying more people get added to the To: line and this cycle expands to more people. My mailer allows me to edit the header portion of my email before sending it out. I try to do this (esp when responding to a list) whenever I remember to. This time Julia I do it on purpose to show my point. -Paul -- #include <stddisclamer> __________________________________________________________________________ [ Paul-Joseph de Werk, B.S. \ RX Net, Inc. ] [ Systems Analyst II \ MIS Dept. ] [ \ vrx: [email protected] ] [ \ inet: paul%[email protected] ] [_______________________________\__________________________________________]
http://www.greatcircle.com/lists/majordomo-users/mhonarc/majordomo-users.199504/msg00124.html
CC-MAIN-2017-30
en
refinedweb
I was debugging my project and could not find a bug. Finally I located it. Look at the code. You think everything is OK, and result will be "OK! OK! OK!", don't you? Now compile it with VC (i've tried vs2005 and vs2008). #include <math.h> #include <stdio.h> int main () { for ( double x = 90100.0; x<90120.0; x+=1 ) { if ( cos(x) == cos(x) ) printf ("x==%f OK!\n", x); else printf ("x==%f FAIL!\n", x); } getchar(); return 0; } Could be this: I know it's hard to accept, but floating point arithmetic simply does not work like most people expect. Worse, some of the differences are dependent on the details of your particular computer's floating point hardware and/or the optimization settings you use on your particular compiler. You might not like that, but it's the way it is. The only way to "get it" is to set aside your assumptions about how things ought to behave and accept things as they actually do behave... (with emphasis on the word "often"; the behavior depends on your hardware, compiler, etc.): floating point calculations and comparisons are often performed by special hardware that often contain special registers, and those registers often have more bits than a double. That means that intermediate floating point computations often have more bits than sizeof(double), and when a floating point value is written to RAM, it often gets truncated, often losing some bits of precision... just remember this: floating point comparisons are tricky and subtle and fraught with danger. Be careful. The way floating point actually works is different from the way most programmers tend to think it ought to work. If you intend to use floating point, you need to learn how it actually works...
https://codedump.io/share/4NwrGRtMq7HW/1/visual-c-mathh-bug
CC-MAIN-2017-30
en
refinedweb
When you need to perform calculations with values that have an associated unit of measure, it is very common to make mistakes by mixing different units of measure. It is also common to perform incorrect conversions between the different units that generate wrong results. The latest Python release doesn't allow developers to associate a specific numerical value with a unit of measure. In this article, I look at three Python packages that provide different solutions to this problem and allow you to work with units of measure and perform conversions. Three Different Packages to Add Units of Measure The need to associate quantities with units of measure in any programming language is easy to understand, even in the most basic math and physics problems. One of the simplest calculations is to sum two values that have an associated base unit. For example, say that you have two electrical resistance values. One of the values is measured in ohms and the other in kilo-ohms. To sum the values, you must choose the desired unit and convert one of the values to the chosen unit. If you want the result to be expressed in ohms, you must convert the value in kilo-ohms to ohms, sum the two values expressed in ohms, and provide the result in ohms. The following Python code uses variables with a suffix that defines the specific unit being used in each case. You have probably used or seen similar conventions. The suffixes make the code less error-prone because you easily understand that r1_in_ohms holds a value in ohms, and r2_in_kohms holds a value in kilo-ohms. Thus, there is a line that assigns the result of converting the r2_in_kohms value to ohms to the new r2_in_ohms variable. The last line calculates the sum and holds the result in ohms because both variables hold values in the same unit of measure. r1_in_ohms = 500 r2_in_kohms = 5.2 r2_in_ohms = r2_in_kohms * 1e3 r1_plus_r2_in_ohms = r1_in_ohms + r2_in_ohms Obviously, the code is still error-prone because there won't be any exception or syntax error if a developer adds the following line to sum ohms and kilo-ohms without performing the necessary conversions: r3_in_ohms = r1_in_ohms + r2_in_kohms There is no rule that assures that all the variables included in the sum operation must use the same suffix; that is, the same unit. There aren't invalid operations between variables that hold values with incompatible units. For example, you might sum a voltage value to a resistance value and the code won't produce any error warning. Now, imagine that Python adds support for units of measure. Each numeric value can have an associated unit of measure enclosed within <>. The following three lines would replace the previous code with an easier to understand syntax: r1 = 500 <ohms> r2 = 5.2 <kilo-ohms> r1_plus_r2 = (r1 + r2) <ohms> r1 holds a value of 500 and an associated unit of measure, ohms. r2 holds a value of 5.2 and an associated unit of measure, kilo-ohms. Because each variable includes information about its unit of measure, the sum operation is smart and it can convert compatible units such as ohms and kilo-ohms. The last line sums the two values taking into account their unit of measure, and converts the result to the specified unit, ohms. The r1_plus_r2 variable holds the result of the sum operation expressed in ohms. The following line would produce an exception because the units of measure are incompatible: sum = (10 <volts> + 500 <ohms>) <inches> However, the support should be smart enough to allow you to mix different length units. For example, the following line would produce a valid result in inches. sum = (10 <inches> + 1200 <centimeters>) <inches> Python doesn't support units of measure, but the three Python packages I examine here provide different ways to enable them. Each package takes a different approach. While none works as well as a native language feature would, these solutions do provide a baseline that you can improve according to your specific needs. Numericalunits: A Bare-Bones Solution Numericalunits is a single Python module (numericalunits.py) that provides easy unit conversion features for your operations. You just need to follow two simple rules: - To assign a unit of measure to a value, multiply the value by the unit. - To express a value generated by its multiplication by the unit in a different unit, divide the value by that unit. You simply need to add the following lines to import the module with an alias ( nu) and execute the $ reset_units method to start working with the different units. import numericalunits as nu nu.reset_units('SI') When you call nu.reset_units('SI'), Numericalunits uses standard SI units (short for Système Internationale d'Unités in French) for storing the values (see Figure 1). This way, any length value is stored in meters, no matter the length unit you specify in the multiplication. Read here if you want more information about SI base units. Figure 1: The SI base units and their interdependencies. If you call the default nu.reset_units(), Numericalunits uses a random set of units instead of the standard SI units. I really don't like using a random set of units because it usually generates a loss of precision and results that lack accuracy. The only advantage of using random units is that you can check dimensional errors by running calculations twice and comparing whether the results match. You have to call nu.reset_units() before each calculation and compare the two values. I don't like this way of checking dimension errors because it adds a huge overhead and it is indeed error-prone. Thus, I suggest using Numericalunits as a unit conversion helper with the standard SI units initialization. Numericalunits doesn't save information about the unit of measure in the numerical variable; therefore, there is no way to know which unit you used when you assigned the value. If you need more than a unit conversions helper, I suggest working with one of the other packages. You can read the following line of code as "assign 500 ohms to r1." r1 = 500 * nu.ohm You can read the following line of code as "display the value of r1 expressed in kilo-ohms." print(r1 / nu.kohm) The following line displays the value of r1 expressed in ohms. print(r1 / nu.ohm) The following code uses Numericalunits to sum the values of r1 and r2. Notice that the code is self-documented because you can easily see that r1 holds a value in ohms and r2 in kilo-ohms. The r1_plus_r2 variable holds the result of the sum operation expressed in ohms and r1_plus_r2_kohms holds the result converted to kilo-ohms. Notice that you can sum the values of r1 and r2 without having to convert the units to ohms and the result will be accurate because of the way in which Numericalunits saves the values in the base units. import numericalunits as nu nu.reset_units('SI') r1 = 500 * nu.ohm r2 = 5.2 * nu.kohm r1_plus_r2 = r1 + r2 r1_plus_r2_kohms = r1_plus_r2 / nu.kohm The following code uses Numericalunits to sum four distance values expressed in four different units of measure: meters, miles, centimeters, and feet. Numericalunits doesn't support plurals for the units. The total_distance variable holds the total distance expressed in feet. import numericalunits as nu nu.reset_units('SI') distance1 = 2500 * nu.m distance2 = 2 * nu.mile distance3 = 3000 * nu.cm distance4 = 3500 * nu.foot total_distance = (distance1 + distance2 + distance3 + distance4) / nu.foot Pint: A Complete Package Pint is a Python package that allows you to work with numerical values associated to units of measure. Because Pint saves the magnitude and the associated units for any numerical type, you are able to know which unit you used when you assigned the magnitude value. You can perform arithmetic operations between compatible units and convert from and to different units. When you try to perform arithmetic operations on magnitudes that have incompatible units of measure, Pint raises a Pint.unit.DimensionalityError exception indicating that it cannot convert from one unit to the other before performing the arithmetic operation. The UnitRegistry class stores the definitions and relationships between units. By default, Pint uses the default_en.txt unit definitions file. This file contains the different units and the prefixes that the UnitRegistry will recognize in plain text. You can easily edit this text file to add any unit you might need to support.
http://www.drdobbs.com/database/the-maximal-rectangle-problem/database/quantities-and-units-in-python/240161101
CC-MAIN-2017-30
en
refinedweb
package com.test; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import android.app.Activity; import android.content.Context; import android.os.Bundle; import android.util.Log; import android.widget.Toast; public class MainActivity extends Activity { private static final String TAG = MainActivity.class.getName(); private static final String FILENAME = "myFile.txt"; /** Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); String textToSaveString = "Hello Android"; writeToFile(textToSaveString); String textFromFileString = readFromFile(); if ( textToSaveString.equals(textFromFileString) ) Toast.makeText(getApplicationContext(), "both string are equal", Toast.LENGTH_SHORT).show(); else Toast.makeText(getApplicationContext(), "there is a problem", Toast.LENGTH_SHORT).show(); } private void writeToFile(String data) { try { OutputStreamWriter outputStreamWriter = new OutputStreamWriter(openFileOutput(FILENAME, Context.MODE_PRIVATE)); outputStreamWriter.write(data); outputStreamWriter.close(); } catch (IOException e) { Log.e(TAG, "File write failed: " + e.toString()); } } private String readFromFile() { String ret = ""; try { InputStream inputStream = openFileInput(FILENAME);); } inputStream.close(); ret = stringBuilder.toString(); } } catch (FileNotFoundException e) { Log.e(TAG, "File not found: " + e.toString()); } catch (IOException e) { Log.e(TAG, "Can not read file: " + e.toString()); } return ret; } } Advertisements 32 thoughts on “Read/Write Text File/Data in Android example code” tnx but where you put the file in assets folder ? @gu nope. you don’t need to put any files in anywhere the problem is, you would like to write some data in a file and read it back if you use the above code, a file will be created in your app sandbox (other app does not have access to it) and you can read it from there Thx this helped me a lot! Only got one problem: When i try to read my data and send it to a TextView it only shows the latest string hope you can help me @Satanta, can you explain what are you trying to do and what is latest string? i have two activities: in the first, i want to write text from an EditText to a file like in your sample it looks like this: private OnClickListener btn=new OnClickListener() { public void onClick(View v){ String name = etWinner.getText().toString(); writeToFile(name); startActivity(new Intent(SubmitActivity.this, MainActivity.class)); } }; private void writeToFile(String data){ String newline=”\r\n”; try { OutputStreamWriter oswName = new OutputStreamWriter(openFileOutput(HIGHSCORE, Context.MODE_PRIVATE)); oswName.write(newline); oswName.write(data); oswName.close(); } catch (IOException e) { Log.e(TAG, “File write failed: ” + e.toString()); } } in the second activity i try to read the stored data and write it to a textView: protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_highscore); btn1 = (Button)findViewById(R.id.buttonBack); btn1.setOnClickListener(btn); String winners = readFromFile(); TextView tvNames=(TextView)findViewById(R.id.textViewNames); tvNames.setText(winners); } … private String readFromFile(){ String ret = “”; try { InputStream inputStream = openFileInput(HIGHSCORE);); } ret = stringBuilder.toString(); inputStream.close(); } } catch (FileNotFoundException e) { Log.e(TAG, “File not found: ” + e.toString()); } catch (IOException e) { Log.e(TAG, “Can not read file: ” + e.toString()); } return ret; } basically its just your code so i don’t get why it doesn’t work 😦 thx for the help Satanta edit: by latest string i mean it only shows the last data stored, not all the data like – let’s say String is “american pie”, it shows “american pie” if i store another string it just shows this one, probably overwrites “american pie” yes, previous data is going to be overwritten by the new data, thats how it supposed to work. if you want to save the data then you need to open the file in MODE_APPEND, not MODE_PRIVATE (line #40) For more information: openFileOutput Ahh okay! thx for the quick help! Tnx this is great but i have one question! J have 3 variables, a,b and c. I want to save them like 0 10 30 etc. (30 is max) and when open application again i want to read them again so b and c wouldnt be 0 they will be 10 and 30 and use that. How J can save them in that way and read them? tnx a lot! Hi thanks for your post..but after saving data into myFile.txt…i want see where that myFile.txt file is saved either sd card or in apps assets folder or some where else… It worked. Thanks @radha You could use DDMS to see it->choose your devide-> choose: data / data / (your package) / files / mytext.txt The inputstreamreader does not read my entire file. the size of the file is 15kb..Am struck..please help me out… My file data is a Json FILE check this out if you want to read write and delete using random access hi my question is how to access text document from mobile in android app I am doing Payment tracker project. My doubt is, if i am giving a number/value then i am pressing a button how to store that number/value in internal phone memory with a mark not send and when the internet is connected it will automatically send to the url given in program(that means post request)and the mark will change after the number/value is send basically it is about JSON Parsing. Can u Help me Plz! i want to know, where mytextfile.txt is stored Should i have to give any permission to store files.. I mean in manifest file? @shwetha No, You don’t need to provide any permissions. when i execute this i am not getting where file is saved. Can you help me by giving procedure to execute this? when i execute this its showing “hello world”. I guess file is not getting created.I am getting error in “R.layout.main”. please can you give a steps to execute this? how to check file present or not! or value=null? Hello, i’m so excited to look into your post. But I have a question. that program is about read/write file in txt. How about to read/write file in .doc ?? please help me, Mr. thankyou 🙂 Hi how do you load the text into a listview using simple adapter ? I want to append words from edit text one per line and save to text file … cat dog cow Than i want to load the text from the text file back as an array into listview ….Is that possible please help Please Answer… Where this file will be saved? If it is writting to already exist file, then how to make new file??? Please see the previous comment of Khắc Phục. Just use a different file name. Here I had used “myFile.txt”. Use a different one, and a new file will be created. No in the above path nothing is available Are you using real device? Emulator or Genymotion device might not work. real device LG E-970 Where would I create a file if I wanted to have a default file created in the /data/data//files directory on compile/installation? C programming details here I’ve been exploring for a little for any high-quality articles or blog posts on this sort of space . Exploring in Yahoo I ultimately stumbled upon this website. Reading this info So i’m satisfied to express that I have an incredibly just right uncanny feeling I came upon just what I needed. I most indubitably will make certain to don?t fail to remember this site and give it a look regularly. Good job it is nice tutorial, check out my code also
https://tausiq.wordpress.com/2012/06/16/readwrite-text-filedata-in-android-example-code/
CC-MAIN-2017-30
en
refinedweb
View Complete Post Hello guys, The scenario is I have multiple open browser window (IE or Firefox) then I want to close the specific browser individually. I have tried to lookup the applications running from the task manager but I can't get the running application from the task manager > application > task. I have added two references below to lookup the task running applications. using System.Diagnostics;using System.Threading; Anyone can help me on how to close individually the specific open browser window using c# code event. Thanks. I have a Dynamic Data .net 3.5 website. I am trying to have a popup display from the code behind. Below is the code. The calling form has an update panel surrounding the content. I get a 404 'resource cannot be found' when it runs. I have tried OpenNewWindow("ConfirmExam.aspx"); and OpenNewWindow("~Exams/ConfirmExam.aspx");. The ConfirmExam.aspx and the calling page are in the same folder called Exams. The calling page is a custom page iin /Custom Pages/Exams/Insert.aspx. The ConfirrmExam.aspx page has yes and no buttons. It executes code to go to a page based on the response. I am open to suggestions.//this is the method callOpenNewWindow("ConfirmExam.aspx");//this is the method definitionpublic void OpenNewWindow(string url) { ScriptManager.RegisterStartupScript(this.UpdatePanel1, typeof(string), "newWindow", String.Format("<script>window.open('{0}');</script>", url), false); } hi all, In Sharepoint 2010, how to adding dynamic watermark to word 2010 when opening? I thinking, may be just need configure in sharepoint 2010? or need sharepoint develop? What you guys think? Any thoughts are welcome! Thank you! Hi, I m working popup window , its working well in IE and Firefox. But it is not working well in safari browser. Please see my code snippet below, <script language="javascript" type="text/javascript"> function CallPrint1(url) { var popup = window.open('', 'popup', 'toolbar=no,menubar=no,height=490px,scrollbars=yes,resizable=yes'); popup.document.open(); popup.document.write(document.getElementById('printscript').innerHTML); popup.document.write(document.getElementById(url).innerHTML); popup.document.close(); return false; } </script> And in safari browser settings "Block pop-up windows" - if disable "Block pop-up windows" in safari browser, popup is working well , if i enable "Block pop-up windows" in safari browser, popup is not working well in safari browser i wants to display popup in enable SharePoint and Open XML: Generating Documents from SharePoint Using Open XML Content Controls WhiteMSDN Magazine October 2009; } ad FROM Hall of Fame Twitter Terms of Service Privacy Policy Contact Us Archives Tell A Friend
http://www.dotnetspark.com/links/15816-dynamic-content-modalpopupextender--open.aspx
CC-MAIN-2017-30
en
refinedweb
Posted 21 Jul 2016 Link to this post We noticed that when grouping on columns that sometimes have a null and sometimes have an empty string, it will create two different groupings. I tried to update the ViewModel in MVC so the property always returns an empty string: public class SampleViewModel { private string _name; [StringLength(50)] public string Name { get { return _name ?? string.Empty; } set { _name= value; } } } That still produces two different groupings. How can I get the grid to group these the same way? Posted 25 Jul 2016 Link to this post Posted 26 Jul 2016 Link to this post I was able to correct the issue. The ViewModel rules were too late in the process. ToDataSourceResult had already done the grouping. So I added a line before it and this works: var results = from s in Sample select new SampleViewModel { Name = s.Name ?? string.Empty }; return Json(results.ToDataSourceResult(request));
http://www.telerik.com/forums/grid-grouping---null-vs-empty-string
CC-MAIN-2017-30
en
refinedweb
Few years back I blogged about adding OpenID login support in ASP.NET application. This time I am blogging about adding Google login support in ASP.NET application. A friend of mine is trying to integrate multiple third party authentication support for one of the application he is developing for his client. He is using DotNetOpenAuth for Google authentication. the code I am using here is from my friend and I am sharing it with his explicit permission. First, download the latest version of DotNetOpenAuth and add its reference in your web application and these two namespaces. using DotNetOpenAuth.OpenId; using DotNetOpenAuth.OpenId.RelyingParty; After adding the reference, add a normal button with CommandArgument to point. <asp:Button On the button click event on the server side: protected void btnGoogleLogin_Click(object sender, CommandEventArgs e) { string discoveryUri = e.CommandArgument.ToString(); OpenIdRelyingParty openid = new OpenIdRelyingParty(); var URIbuilder = new UriBuilder(Request.Url) { Query = "" }; var req = openid.CreateRequest(discoveryUri, URIbuilder.Uri, URIbuilder.Uri); req.RedirectToProvider(); } Now when you click the button it will take you to Google login page which look something like this. You can see on the right side of the screen with the information of the site requesting the authentication. Once you get successfully authenticated with your entered user name and password, you will then redirect to the confirmation page: As I am using my local development server, you will see Locahost. Once you deploy the application in the production environment it will automatically get the name of the domain. Clicking on the Sign in button you will then be redirected to the main page, but before you get to the main page you need to check whether the authentication was successful or was it cancelled by the user or was failed. To make sure use the below code on the load event of the login page: protected void Page_Load(object sender, EventArgs e) { OpenIdRelyingParty rp = new OpenIdRelyingParty(); var response = rp.GetResponse(); if (response != null) { switch (response.Status) { case AuthenticationStatus.Authenticated: Session["GoogleIdentifier"] = response.ClaimedIdentifier.ToString(); Response.Redirect("Default.aspx"); break; case AuthenticationStatus.Canceled: Session["GoogleIdentifier"] = "Cancelled."; break; case AuthenticationStatus.Failed: Session["GoogleIdentifier"] = "Login Failed."; break; } } } On Default.aspx page I have set the ClaimedIdentifier: The response/status returned from Google will be checked here and we will redirect the application to work the way accordingly. My friend sends me the above code to find out whether there is any way we can logout from the service. Well, unfortunately there isn't any specific way to log out using DotNetOpenAuth? But there is a workaround. I don't know if it is a good practice or bad but it worked out for me. To logout, I am just calling this logout URL used by Google. If you have some suggestions or you know a better way or approach of doing this then please drop a line in the comments sections. {{ parent.title || parent.header.title}} {{ parent.tldr }} {{ parent.linkDescription }}{{ parent.urlSource.name }}
https://dzone.com/articles/implementing-google-account
CC-MAIN-2017-30
en
refinedweb
Details "To approximate the square root of a positive number n using Newton's method, you need to make an initial guess at the root and then refine this guess until it is "close enough." Your first initial approximation should be root = 1;. A sequence of approximations is generated by computing the average of root and n/root. Use the constant: private static final double EPSILON = .00001; Your loop for findSquareRoot should behave like this: make the initial guess for root while ( EPSILON < absolute value of the difference between root squared and n ) calculate a new root return root Your class should have a constructor that takes the number you want to find the square root of. Implement the usual accessor method(s) and a findSquareRoot method that uses Newton's method described above to find and return the square root of the number. Add a method setNumber that takes a new number that replaces the number in the instance field. Supply a toString method that returns a string containing the number and the square root of the number. " And here's what I have.... public class NewtonsSquareRoot{ private static final double EPSILON = .00001; private int myNumber; private double root; private double guess; public NewtonsSquareRoot(int number){ myNumber = number; } public int getNumber(){ return myNumber; } public double findSquareRoot(){ guess = 1; root = Math.sqrt(myNumber); while (EPSILON < Math.abs(Math.pow(root, 2) - myNumber)) { guess++; } return root; } public void setNumber(int number){ myNumber = number; } public String toString(){ String s = new String(); s = "The square root of " + myNumber + " is " + root + "."; return s; } } Thanks in advance for helping
http://www.dreamincode.net/forums/topic/73357-finding-the-square-root-with-a-while-loop/
CC-MAIN-2017-30
en
refinedweb
Installation problem Hello, I have some problems while installing the package. First of all, I'm on Ubuntu 10.04 64-bit, python v2.6.5, cython v0.14.1 When I try: {{{ python setup.py install }}} it complains about not finding the Newton library, so I rename "libnewton.a" to "libNewton.a" (notice the capital letter). Then I only get a warning {{{ cc1plus: warning: command line option "-Wstrict-prototypes" is valid for Ada/C/ObjC but not for C++ }}} But the problem appears while in a python shell I import the package: {{{ import newton Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: /usr/local/lib/python2.6/dist-packages/newton.so: undefined symbol: NewtonBodySetVelocity }}} I don't know what else to do. Please help us! Thanks in advance! Well, I didn't try with the lastest Newton's SRC (v2.33). So I downloaded it, compile it and copied the generated "libNewton.a" for my platform (linux64) to newpy's folder. Then it works! The problems is fixed. The original "libnewton.a" included is obsolete, or needs to be updated.
https://bitbucket.org/r1chardj0n3s/newpy/issues/1/installation-problem
CC-MAIN-2017-30
en
refinedweb
Re: Multiple Namespaces Expand Messages - Ok, Byrne helped me out with a link to this site: But this question still worries me: >> when i receive such a structure from a client, my SOAP::Lite Servertype >> comes with a message like this: >> Application failed during request deserialization: Unrecognized >> '{urn:catalog}CatalogID'server >> i think this is because the server does not know about the xsd that >> belongs to the namespace 'urn:catalog' ... how can i tell the >> where the according xsd is located? Your message has been successfully submitted and would be delivered to recipients shortly.
https://groups.yahoo.com/neo/groups/soaplite/conversations/topics/2576?o=1&m=e&var=1&tidx=1
CC-MAIN-2017-30
en
refinedweb
How to display images from folder dynamically into web page using ASP.NET? In this article I will be explaining you a simple way to show and rotate images in the web page without using AJAX, Ad rotator, Jquery etc. Description In many times we are need to display images in the web page from folder and display that image like slide show (advertisement). If you create slideshow using ad rotator you need to mention path in XML for each image that is static one. But using this code snippet no need to create XML file or any file to load images in the webpage. Just only one time mention path of the folder in the source code. This code snippet is helps you to make slide show (like DNS awards winner list slide show) from your folder image. If you want add any image in your slide show then no need to alter code just place that image in that rooted path it's enough automatically image will appear in web page. For example, Here in my code snippet I have placed images in the folder name like "uploads" that is placed in the root path. And I have create one user control page (Image.ascx) to write code in that code behind and then I drag and drop that custom control in webpage Default.aspx to make slide show in my web page. Full Source Code I have design page in the user control using image control like below Client Side: Image.ascx <%@ Control Language="C#" AutoEventWireup="true" CodeFile="Image.ascx.cs" Inherits="Image" %> <asp:ScriptManager </asp:ScriptManager> <asp:UpdatePanel <ContentTemplate> <asp:Image <br /> <asp:Timer </asp:Timer> </ContentTemplate> </asp:UpdatePanel> Code Behind: Image.ascx.cs using System.IO; using System.Collections; public partial class Image : System.Web.UI.UserControl { //just provide here your image folder path nothing to change anywhere else string store_folder = HttpContext.Current.Server.MapPath("~/uploads"); protected void Page_Load(object sender, EventArgs e) { //Initally assign image count as zero int count = 0; if (!Page.IsPostBack) { ArrayList slide = new ArrayList(); System.IO.DirectoryInfo inputDir = new System.IO.DirectoryInfo(store_folder); //Get each file in the specified folder store in the array list foreach (FileInfo eachfile in inputDir.GetFiles()) { slide.Add(eachfile.ToString()); count += 1; } //store the total file count in the session Session["count"] = count; //store the total images name arraylist in the session Session["images"] = slide; Timer1_Tick(this, new EventArgs()); } } protected void Timer1_Tick(object sender, EventArgs e) { ArrayList slide = new ArrayList(); //Get the image name from the session assign back in the arraylist slide = (ArrayList)Session["images"]; if ((Session["current"] != null) && (Convert.ToInt32(Session["current"]) != Convert.ToInt32(Session["count"]) - 1)) { //If already image is display in control then assign next image when post back occur Image1.ImageUrl = "~\\uploads\\" + slide[Convert.ToInt32(Session["current"]) + 1].ToString(); Session["current"] = Convert.ToInt32(Session["current"]) + 1; } else if (Session["current"] == Session["count"]) { //If that displayed image is last image then start again from beginning to display in image control Image1.ImageUrl = "~\\uploads\\" + slide[0].ToString(); Session["current"] = 0; } else { if (Convert.ToInt32(Session["count"]) != 0) { //Initally load the first image in the image control Image1.ImageUrl = "~\\uploads\\" + slide[0].ToString(); Session["current"] = 0; } } } } Default.aspx <h3> Display Images from folder to web page and Change automatically certain period of time</h3> <uc1:Image OutputThe output of the above code is look like below image Source CodeHere I have attached full source code for the same download it and test it. Client Side : ASP.NET Code Behind : C# ConclusionI hope this article is help you to create slide show in simple way retrieve image from folder. Thanks ..That's what I was looking for
http://www.dotnetspider.com/resources/44432-How-display-dynamic-images-from-folder-into-web-page.aspx
CC-MAIN-2017-30
en
refinedweb
Enabling Logging Programmatically The run-time engine provides a collection of LogProvider objects that enable event-specific information to be captured during package validation and execution. LogProvider objects are available to DtsContainer objects, including the TaskHost, Package, ForLoop, and ForEachLoop objects. Logging is enabled on individual containers, or on the whole package. There are several types of log providers that are available for a container to use. This provides the flexibility to create and store log information in many formats. Enlisting a container object in logging is a two-step process: first logging is enabled, and then a log provider is selected. The LoggingOptions and LoggingMode properties of the container are used to specify the logged events and to select the log provider. The LoggingMode property, found in each container that can perform logging, determines whether the container's event information is recorded to the event log. This property is assigned a value from the DTSLoggingMode structure, and is inherited from the container's parent by default. If the container is a package, and therefore has no parent, the property uses the UseParentSetting, which defaults to Disabled. Selecting a Log Provider After the LoggingMode property is set to Enabled, a log provider is added to the SelectedLogProviders collection of the container to complete the process. The SelectedLogProviders collection is available on the LoggingOptions object, and contains the log providers selected for the container. The Add method is called to create a provider and add it to the collection. The method then returns the log provider that was added to the collection. Each provider has configuration settings that are unique to that provider, and these properties are set using the ConfigString property. The following table lists the available log providers, their description, and their ConfigString information. Events are included in or excluded from the event log by setting the EventFilterKind and the EventFilter properties of the container. The EventFilterKind structure contains two values, ExclusionFilter and InclusionFilter, that indicate whether the events that are added to the EventFilter are included in the event log. The EventFilter property is then assigned a string array that contains the names of the events that are the subject of the filtering. The following code enables logging on a package, adds the log provider for Text files to the SelectedLogProviders collection, and specifies a list of events to include in the logging output. using System; using Microsoft.SqlServer.Dts.Runtime; namespace Microsoft.SqlServer.Dts.Samples { class Program { static void Main(string[] args) { Package p = new Package(); ConnectionManager loggingConnection = p.Connections.Add("FILE"); loggingConnection.ConnectionString = @"C:\SSISPackageLog.txt"; LogProvider provider = p.LogProviders.Add("DTS.LogProviderTextFile.2"); provider.ConfigString = loggingConnection.Name; p.LoggingOptions.SelectedLogProviders.Add(provider); p.LoggingOptions.EventFilterKind = DTSEventFilterKind.Inclusion; p.LoggingOptions.EventFilter = new String[] { "OnPreExecute", "OnPostExecute", "OnError", "OnWarning", "OnInformation" }; p.LoggingMode = DTSLoggingMode.Enabled; // Add tasks and other objects to the package. } } }
https://technet.microsoft.com/en-us/library/ms136023(v=sql.105).aspx
CC-MAIN-2017-30
en
refinedweb
(Intro: Sometimes as part of testing I go to silly lengths to try to point out what I think is going to be a bug in a piece of code I have seen. And frequently when I do this, I am basically wrong, for any of various reasons – it’s a bug according to some world view which is not quite aligned with reality, or whatever. Overall I think I tend to frustrate some people when I go through this exercise, but an end result is often that I learn something...) I just emerged from the testing vaults with a lost feature to share. In VS 2010 there wasn’t a good customization story for the workflow designer in Visual Studio. What do I mean customization story? Let me try to think of some motivating examples. Example #1: You want to customize Workflow Designer inside VS, by registering a new custom property editor for a system or custom Type. For sake of example e.g. Nullable<T>. Example #2 You want VS to provide a new Service to your custom designers via EditingContext.Services – but the service will actually be hosted in Visual Studio’s main appdomain, and its data will persist throughout a VS session, and be shared with all WorkflowDesigner instances. These scenarios are not supported in VS 2010. However, what I found out so far is that Example #1 and Example #2 appear to be supported in VS 2012. Now I’m calling this a lost feature because I couldn’t find any official docs for this, so these are what I picked up from talking to developer Tony, and disassembling certain assemblies. C:\Program Files (x86)\Microsoft Visual Studio 11.0\Common7\IDE\Extensions\WorkflowDesigner Tony also sent me an example code which is just an ordinary C# class with a MEF exported interface: [Export(typeof(IEntityInfoService))] // note: IEntityInfoService - this is just some custom type for example, what type really doesn’t matter Aside from this, his sample had two features which let me know the class might be hosted in the main VS app domain: 1) it imports DTE, which is the core Visual Studio programming interface. A DTE object gives you access to many things in VS such as the VS solution tree [Import] public DTE Dte { get; set; } 2) it inherits MarshalByRefObject – this means we can hold object references to the object from another domain, and that it will inherit stuff you need for .Net remoting lifetime management: GetLifetimeService, etc. Another interesting find, while trying to understand the tip In Microsoft.VisualStudio.Activities.AddIn.WorkflowDesignerAddIn, there is code which uses an internal class (AddInWorkflowDesignerExtensionManager) to discover extensions which implement IRegisterMetadata. You know, that interface for registering custom designer stuff. For each such extension found, the designer add-in calls Register() straight away. Last find Further in the ‘how it might work’ category – there’s another internal class that VS uses called WorkflowDesignerExtensionManager, which appears to be there for allowing Services (as in EditingContext.Services) to be discovered by the WorkflowDesigner or custom activity designers. It leverages the MEF API (and DirectoryCatalog in particular) to discover extensions from the directory Tony mentioned! Note, I heard a long time back that MEF was also planned to be key part of the extensibility story for Visual Studio 2012… looks like this sort of fits into that strategy. Trying stuff… Pieces of the puzzle are falling into place nicely. So it’s time to try some stuff out! 1) Create a custom dll, implement and Export (MEF) the IRegisterMetadata interface, and see if I can get called by Visual Studio. I’m guessing I don’t need MarshalByRefObject in this scenario, since the MetadataStore is generally in the WorkflowDesigner’s AppDomain. 2) Create a custom dll, implement and Export a custom interface, IFooService to a MarshalByRef object. Get and call the service from a custom activity designer, which will be loaded in one of the usual ways. Since Visual Studio 2010 doesn’t (in my experience) do a very good job of debugging Visual Studio 2012, I’ll be creating my test projects in Visual Studio 2012. namespace MetadataExtension { [Export(typeof(IRegisterMetadata))] internal class RegisterMetadata : IRegisterMetadata { public RegisterMetadata() { } public void Register() MessageBox.Show("Hello... and goodbye sequence designer"); var builder = new AttributeTableBuilder(); builder.AddCustomAttributes(typeof(Sequence), new DesignerAttribute(typeof(ActivityDesigner))); MetadataStore.AddAttributeTable(builder.CreateTable()); } } I copy the debug output dll to C:\Program Files (x86)\Microsoft Visual Studio 11.0\Common7\IDE\Extensions\WorkflowDesigner, and launch a new instance of Visual Studio 2012. I create a new workflow activity library project, and … success! I have just succeeded in pranking myself – the Sequence activity is now totally unusable: Of course while you could use this trick to prank your WF-using office mates, I don’t recommend it. We’d better think of some more constructive use for that feature. Here’s the call stack when the metadata extension gets called. METADATAEXTENSION.dll!MetadataExtension.RegisterMetadata.Register() Line 24 C# Microsoft.VisualStudio.Activities.Addin.dll!Microsoft.VisualStudio.Activities.AddIn.WorkflowDesignerAddIn.ExecuteRegisterMetadataExtensions() + 0x82 bytes Microsoft.VisualStudio.Activities.AddinAdapter.dll!Microsoft.VisualStudio.Activities.AddInAdapter.IDesignerContractToViewAddInAdapter.ExecuteRegisterMetadataExtensions() + 0xc bytes mscorlib.dll!System.Runtime.Remoting.Messaging.StackBuilderSink.SyncProcessMessage(System.Runtime.Remoting.Messaging.IMessage msg) + 0x1e7 bytes […more remoting stuff…] mscorlib.dll!System.Runtime.Remoting.Proxies.RemotingProxy.InternalInvoke(System.Runtime.Remoting.Messaging.IMethodCallMessage reqMcmMsg, bool useDispatchMessage, int callType) + 0x1cc bytes mscorlib.dll!System.Runtime.Remoting.Proxies.RemotingProxy.Invoke(System.Runtime.Remoting.Messaging.IMessage reqMsg) + 0x66 bytes mscorlib.dll!System.Runtime.Remoting.Proxies.RealProxy.PrivateInvoke(ref System.Runtime.Remoting.Proxies.MessageData msgData, int type) + 0xea bytes Microsoft.VisualStudio.Activities.HostAdapter.dll!Microsoft.VisualStudio.Activities.HostAdapter.IDesignerViewToContractHostAdapter.ExecuteRegisterMetadataExtensions() + 0xc bytes Microsoft.VisualStudio.Activities.dll!Microsoft.VisualStudio.Activities.EditorPane.CreateWorkflowDesignerInIsolatedMode() + 0x2cb bytes Microsoft.VisualStudio.Activities.dll!Microsoft.VisualStudio.Activities.EditorPane.Microsoft.VisualStudio.Shell.Interop.IPersistFileFormat.Load(string fileName, uint formatMode, int readOnly) + 0xda bytes Microsoft.VisualStudio.Activities.dll!Microsoft.VisualStudio.Activities.EditorPane.Microsoft.VisualStudio.Shell.Interop.IVsPersistDocData.LoadDocData(string documentName) + 0xd bytes From this stack we can notice that 1) our RegisterMetadata extension being called is very early indeed in the process of creating and setting up the Workflow Designer 2) it’s triggered by Visual Studio which is specifically calling for such extensions to be loaded, so it doesn’t apply for rehosted apps. Which is fine. In a rehosted app we already are able to control and hook into the workflow designer creation process. The second example is just slightly more work. Part #1: Define a contract public interface ICustomContract void Hello(); Part #2: Define an extension [Export(typeof(ICustomContract))] public class CustomContractExtension : MarshalByRefObject, ICustomContract public CustomContractExtension() public void Hello() MessageBox.Show("Hello"); MessageBox.Show("Got DTE? " + (this.Dte != null)); [Import] public DTE Dte { get; set; } Now we can place our extension dll in the Extensions\WorkflowDesigner directory, fire up a new instance of VS 2012, and see if anything happens. This time, nothing happens when we create a new class library. We need a couple more steps. 1) Add a new code activity 2) Add a new activity designer 3) Associate them (today I’ll do it the quickest way, using DesignerAttribute) [Designer(typeof(ActivityDesigner1))] public sealed class CodeActivity1 : CodeActivity protected override void Execute(CodeActivityContext context) 4) Add a reference to the DLL where we defined ICustomContract – we can browse it from the Extensions folder 5) Modify our activity designer slightly, in the .xaml.cs file: public partial class ActivityDesigner1 public ActivityDesigner1() InitializeComponent(); protected override void OnModelItemChanged(object newItem) base.OnModelItemChanged(newItem); if (newItem is ModelItem) { var cc = (newItem as ModelItem).GetEditingContext().Services.GetService<ICustomContract>(); if (cc != null) { cc.Hello(); } } The important point here is that we will call EditingContext.Services.GetService<ICustomContract>() in order to get the custom service created above. When we try it out, we will see our popup dialogs: Interesting and important to note - if you set breakpoints in the constructor of CustomContractExtension, then you will see that it is being created lazily, upon demand. Also interesting and important to note – the CustomContractExtension is indeed created in the main Visual Studio App Domain. So calls to it will work calling via .Net Remoting. Now it happens that there is no reference to that object in the main VS AppDomain, which leaves it subject to potential garbage collection. Ideally WorkflowDesigner or the EditingContext would create a remoting Sponsor in order to extend the life time of the object to match the lifetime of the EditingContext.Services object. However, in practice this does not happen. Leaving Visual Studio for idle for a few minutes, you can come back, try to use your custom activity again, and get errors such as: Unfortunately I haven’t figured out the right way to stop Garbage collection blowing this up yet, but I think there are a couple approaches that could work: 1) sponsorship as mentioned above, 2) forced reachability to the service itself by ensuring references from a GC root in its home app domain.
http://blogs.msdn.com/b/tilovell/archive/2012/10/04/wf4-vs-workflowdesigner-extensions-in-visual-studio-2012.aspx
CC-MAIN-2014-42
en
refinedweb
commands = fileToString("myPath/myCode.prg") executeString(commands)Wait, if you use function pointers instead of a Command object (with do and undo methods), don't you lose the undo feature, which is the main concrete feature touted above? Undo is not a feature of the Command pattern, it is simply one of the application features you could implement using it. Using single-argument function pointers in a language like C, you could use an array of function-pointer/argument pairs as your undo stack. public class ViewCartCommand{ private RowSet rs; private int _cartId; public void setCartId(int cartId){ _cartId = cartId; } public void execute() throws DataCommandException{ Connection conn = null; PreparedStatement stat = null; try{ conn = ConnectionPool.getConnection(); stat = conn.prepareStatement(query); stat.setInt(1, _cartId); rs.populate(stat.executeQuery()); rs.beforeFirst(); }catch(SQLException e){ throw new DataCommandException(e); }finally{ try{ if(stat != null) stat.close(); if(conn != null) conn.close(); }catch(SQLException err){ throw new DataCommandException("failed to close db resources", err); } } } public String getProductName(){ return rs.getString("product_name"); } // etc }The advantage of this approach compared to O/R mapping solutions (including DataAccessObjectPattern) is that you don't have to navigate an object graph or create custom value objects. For more information check out -- Cameron Zemek ([email protected])
http://c2.com/cgi/wiki?CommandPattern
CC-MAIN-2014-42
en
refinedweb
Opened 4 years ago Closed 21 months ago #4947 enhancement closed wontfix (wontfix) Routes dispatching in twisted.web.server Description (last modified by tom.prince) It would be nice to have an option to use Routes-based dispatching in twisted.web.server. Something like: from twisted.web.server import Site class Controller(object): def index(self, request): return '<html><body>Hello World!</body></html>' c = Controller() dispatcher = Dispatcher() dispatcher.connect(name='index', route='/', controller=c, action='index') factory = Site(dispatcher) You would then also be able to do stuff like this: from twisted.web.static import File dispatcher.putChild('static', File(static_path)) We could adapt something like; we're using this for several internal webservices at work and it's quite nice to program in. : ) Change History (6) comment:1 Changed 4 years ago by DefaultCC Plugin - Cc jknight added comment:2 Changed 4 years ago by glyph comment:3 Changed 4 years ago by glyph Would you mind renaming 'twistedroutes' to 'txroutes' until we include it in the core? comment:4 Changed 4 years ago by steiza Not at all, you can now find it here: comment:5 Changed 22 months ago by tom.prince is also related. I think given the diversity of implementations, this is perhaps something that should live outside twisted proper. comment:6 Changed 21 months ago by exarkun - Resolution set to wontfix - Status changed from new to closed Such an API could be part of Twisted, if anyone demonstrates they have a good implementation that a lot of people were happy with, and which many people would benefit from having included in Twisted (simplified deployment story or some implementation enhancement which is only possible by maintaining the code in lockstep with the rest of Twisted Web). Otherwise there doesn't seem to be much problem caused by distributing these things separately. Please feel free to re-open in case something qualifies according to my first paragraph. Fixing markup
http://twistedmatrix.com/trac/ticket/4947
CC-MAIN-2014-42
en
refinedweb
02 April 2012 09:32 [Source: ICIS news] SINGAPORE (ICIS)--?xml:namespace> The plants will include a two-stream hydrogen/carbon monoxide (HyCO) facility, a single-stream ammonia plant and an ammonia storage tank, Linde said. The industrial gases firm will invest around $380m (€285m) in the project, with the production units expected to be ready in 2015, it said. Sadara will use the carbon monoxide, hydrogen and ammonia primarily for the production of aromatics, isocyanates, amines and hydrogen peroxide, the German industrial gases firm added. Sadara is a joint venture between Saudi Aramco and US company
http://www.icis.com/Articles/2012/04/02/9546731/linde-to-build-on-site-plants-for-sadaras-saudi-chemical.html
CC-MAIN-2014-42
en
refinedweb
ResourceState Since: BlackBerry 10.0.0 #include <bb/cascades/ResourceState> Exposes resource state enum to QML. Overview Public Types Index Public Types Definitions of the resource states. BlackBerry 10.0.0 - Unknown The state is not known. The resource is being loaded (for example, it's downloading or decoding, etc).Since: BlackBerry 10.0.0 - Loaded The resource is loaded and is ready to be used.Since: BlackBerry 10.0.0 - ErrorNotFound The resource is not found (for example, the given path is invalid).Since: BlackBerry 10.0.0 - ErrorInvalidFormat The resource is found but could not be recognized (for example, the data is corrupt or has an unhandled format).Since: BlackBerry 10.0.0 - ErrorMemory There's not enough memory to decode the resource.Since: BlackBerry 10.0.0 Got questions about leaving a comment? Get answers from our Disqus FAQ.comments powered by Disqus
http://developer.blackberry.com/native/reference/cascades/bb__cascades__resourcestate.html
CC-MAIN-2014-42
en
refinedweb
While developing WCF client/services, I frequently encounter this annoying error whenever I run my client to connect to the service contract for the first time. I term them as "timewasters". This post will partly serve as a reminder to me, and hopefully someone will benefit from it if they came across the same problem. The story goes like this, you start up your usual Visual Studio 2005 to work on a simple WCF's application (you know the usual service <-> client stuffs). So you created your service and named your interface IContact with a namespace called Contact namespace Contact{ [ServiceContract] public interface IContact { [OperationContract] void Something(); }...} You then go on to create your configuration and service file, opened up your IIS, create a virtual directory and dumped the appropriate files into the virtual directory. You then test the directory from your Internet Explorer. Everything works beautifully. So now you do the easy part. Fire up the SDK command prompt and use the "svcutil" command to create the proxy needed for connection to the service. You create a client project, add the auto-generated proxy and output.config file and start to consume the service via the proxy you've just created. After all is done, you do a run and this came staring at you So what's wrong? It's all spelled out in the error description actually. The resolution is pretty simple, here's something you can take note so that this error message will be gone for good I guess if someone ran into the same problem again as I do, they might benefit from this post.
http://geekswithblogs.net/nestor/archive/2007/01/05/102828.aspx
CC-MAIN-2014-42
en
refinedweb
UK High Altitude Society A few matlab scripts for attitude and hopefully (at some point) position estimation using MEMS sensors. The data used for tests was kindly recorded by a member of the sparkfun forum with a sparkfun 6DOF IMU. Ascii data is here. The format is time (seconds) roll pitch and yaw gyro rates, and x,y,z accel readings in g (roughly - obviously gain and bias need to be applied). It is possible to add three gyro biases to the state vector, this can give improved results even without a magnetometer to correct yaw. This is the pure data from the IMU, read the code to find derived bias and gain values for the sensors. The quaternion state vector was converted to elevations relative to the artificial horizon - the data you would actually need to input into PID loop(s) controlling the airframe. The “glitch” at approx 51 seconds is due to the fact the IMU is effectively in a “dive” so the Yaw becomes relatively ambiguous - a bit of noise is enough to force the projection of the end of the bodies x axis on the xy plane (found as part of the calculation process) into the opposite xy plane quadrant and cause a heading shift of pi. This is a real effect - the ground track of an aircraft in this situation really would reverse direction i.e. change by pi radians - think of a plane in a vertical loop. The horizon elevations between motions are less than +-2 degrees, which is well within the limits of the experiment (IMU was handheld). Also interesting is the low drift of the “Yaw” (more correctly a heading in this graph), despite the fact there is no magnetometer to provide a second fix vector. This simply uses guesstimated accelerometer biases and gains, and was calculated independently from the filter, so is very sub optimal. However the IMU can be seen to be lifted off the table and placed back down. The large sideways drifts are due to inaccurate gain and bias values preventing g from being cancelled out correctly, and also slightly decreasing the absolute accuracy of the attitude solution during sharp turns. Nevertheless, the position is quite stable over a period of 1.5 minutes, quite impressive for MEMS. The second plot shows the results with velocity decaying at approx exp(-0.3t) where t is time in seconds. This requires an ascii input file, see the description at the top of the page. function quat_IMU_simple(beta,alpha,q) %constants--------------------------------------- %beta=0.000003; %alpha=0.0001; %q=0.04 are sensible values gyro_null=[0.0183;0.0101;0.0063]; gyro_gain=[1/1.3096,0,0;0,1/1.41875,0;0,0,1/1.54607]; delta_time=0.02857; %35 Hz sampling in this data set %read in the data - each data type is in a seperate row fp=fopen('rpy90bothways.TXT','r'); data=fscanf(fp,'%f %f %f %f %f %f %f %f %f %f',[10,inf]); fclose(fp); j=size(data); j=j(1,2); x_nav=[0;0;0]; v_nav=[0;0;0]; %setup Kalman filter x=[1;0;0;0]; P=[pi^2,0,0,0;0,pi^2,0,0;0,0,pi^2,0;0,0,0,pi^2]; Q=(q/35)*[1,0,0,0;0,1,0,0;0,0,1,0;0,0,0,1]; %main loop--------------------------------------- for n=1:j; %takes vectors g and a as inputs for gyros and accels, g needs to be %premultiplied by delta time per run. g=gyro_gain*(data(2:4,n)-gyro_null)*delta_time; %get the accel vecor a=data(5:7,n); %a=-a for some of the sensors on the dataset; a(2)=-a(2); a(1)=-a(1); %normalise acceleration vector a=a/sqrt(a(1)^2+a(2)^2+a(3)^2); %accel sensor measurement jacobian H=[2*x(3),2*x(4),2*x(1),2*x(2);-2*x(2),-2*x(1),2*x(4),2*x(3);2*x(1),-2*x(2),-2*x(3),2*x(4)]; %misalignment error jacobian F=[1,-g(1)/2,-g(2)/2,-g(3)/2;g(1)/2,1,g(3)/2,-g(2)/2;g(2)/2,-g(3)/2,1,g(1)/2;g(3)/2,g(2)/2,-g(1)/2,1]; %input error jacobian E=0.5*[-x(2),-x(3),-x(4);x(1),-x(4),x(3);x(4),x(1),-x(2);x(3),x(2),x(1)]; %propogate the state vector x=0.5*[2,-g(1),-g(2),-g(3);g(1),2,g(3),-g(2);g(2),-g(3),2,g(1);g(3),g(2),-g(1),2]*x; %normalise the quaternion x=x/sqrt(x(1)^2+x(2)^2+x(3)^2+x(4)^2); %propogate the covarience, can also use P+=, and subtract identity from F P=F*P*F'+E*E'*beta+Q; %find the residual y=a-[2*(x(2)*x(4)+x(1)*x(3));2*(x(3)*x(4)-x(1)*x(2));x(1)^2-x(2)^2-x(3)^2+x(4)^2]; fflush(stdout); %alpha is accel noise S=H*P*H'+alpha*[1,0,0;0,1,0;0,0,1]; %find gain K=P*H'*S^(-1); %state update x=x+(K*y); %covariance update P=P-K*H*P; %normalise quaternion x=x/sqrt(x(1)^2+x(2)^2+x(3)^2+x(4)^2); %work out the roll pitch and yaw (these are NOT EULER ANGLES) roll(n)=asin(2*x(1)*x(2)+2*x(3)*x(4)); trans_x=[x(1)^2+x(2)^2-x(3)^2-x(4)^2;2*x(1)*x(4)+2*x(2)*x(3);2*x(2)*x(4)-2*x(1)*x(3)]; pitch(n)=asin(trans_x(3)); z_x=2*(x(2)*x(4)+x(1)*x(3)); %projection of z axis onto x axis z_z=x(1)^2-x(2)^2-x(3)^2+x(4)^2; %projection of z axis onto z axis p_z=z_x/norm([z_z;z_x]); %the body x axis rotated so as to have 0 y component p_x=sqrt(1-p_z^2); if(z_z>0) %sign ambiguity p_x=-p_x; endif yaw(n)=asin(norm(cross(trans_x,[p_x;0;p_z])));%asin the lenght to get the rotation angle if trans_x(2)<0 %account for ambiguity in sign caused by taking norm - check sign of y co-ord of transformed x yaw(n)=-yaw(n); endif if trans_x(1)<0 %mapping of x onto x is negative yaw(n)=pi-yaw(n); %account for the ambiguity in asin if(yaw(n)>pi) %stay in range +-pi yaw(n)-=2*pi; endif endif if z_z<0 %this if for the case where we are inverted - the body z axis is in -ive z direction so reverse yaw yaw(n)=-yaw(n); endif endfor plot(data(1,1:j),roll(1:j),"r",data(1,1:j),pitch(1:j),"g",data(1,1:j),yaw(1:j),"b"); h = legend('Roll','Pitch','Yaw',3); set(h,'Interpreter','none'); endfunction
http://ukhas.org.uk/code:4_state_extended_kalman_filter_in_matlab
CC-MAIN-2014-42
en
refinedweb
11 February 2011 20:25 [Source: ICIS news] HOUSTON (ICIS)--US melamine imports for 2010 were up 65% from 2009, according to data released by the US International Trade Commission (ITC) on Friday. US 2010 melamine imports rose by 65% to 31,780 tonnes from 19,259 tonnes in 2009. Imports rose last year as a result of increased demand in ?xml:namespace> Imports in December rose 88% to 1,727 tonnes from 919 tonnes in December 2009, the ITC said. December imports fell by 34% from 2,621 tonnes in November 2010. The biggest exporters of melamine to the US melamine exports for 2010 were up 50% from 2009. Melamine exports rose by 50% to 33,914 tonnes from 22,585 tonnes in 2009. Major destinations
http://www.icis.com/Articles/2011/02/11/9434837/us-2010-melamine-imports-rise-65-from-2009-itc.html
CC-MAIN-2014-42
en
refinedweb
Aa codes From PyMOLWiki Just a quick little script to allow you to convert from 1-to-3 letter codes and 3-to-1 letter codes in PyMOL. Copy the code below and drop it into your .pymolrc file. Then, each time you load PyMOL, "one_letter" and "three_letter" will be defined. The Code Simple # one_letter["SER"] will now return "S"'} # three_letter["S"] will now return "SER" three_letter = dict([[v,k] for k,v in one_letter.items()]) three_letter ={'V':'VAL', 'I':'ILE', 'L':'LEU', 'E':'GLU', 'Q':'GLN', \ 'D':'ASP', 'N':'ASN', 'H':'HIS', 'W':'TRP', 'F':'PHE', 'Y':'TYR', \ 'R':'ARG', 'K':'LYS', 'S':'SER', 'T':'THR', 'M':'MET', 'A':'ALA', \ 'G':'GLY', 'P':'PRO', 'C':'CYS'} Simple and Clever Here's another way to accomplish this # The real convenience in there is that you can easily construct any # kind of hash by just adding a matching list, and zipping. aa1 = list("ACDEFGHIKLMNPQRSTVWY") aa3 = "ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR".split() aa123 = dict(zip(aa1,aa3)) aa321 = dict(zip(aa3,aa1)) # Then to extract a sequence, I tend to go for a construction like: sequence = [ aa321[i.resn] for i in cmd.get_model(selection + " and n. ca").atom ] Using BioPython If you have BioPython you can use the following, which includes also many three-letter codes of modified amino acids: from Bio.PDB import to_one_letter_code as one_letter Using PyMOL from pymol.exporting import _resn_to_aa as one_letter Example Usage # we used to have to do the following to get the amino acid name from pymol import stored stored.aa = "" cmd.iterate("myselection", "stored.aa=resn") # now we can just call three_letter[string.split(cmd.get_fastastr("myselection"),'\n')[1]]
http://www.pymolwiki.org/index.php/Aa_codes
CC-MAIN-2014-42
en
refinedweb
In my continuing trend of trying to create a polyglot application, I’ve been working on introducing an asset pipeline into my small Clojure application. There’s a Clojure plugin, lein-haml-sass, that’ll do a good job, but it depends on a JRuby runtime during development and a number of Rubygems, namely haml and sass to compile those targets. Plugins I got this working fine by adding the plugin to my project.clj file: :plugins [[lein-haml-sass "0.2.7-SNAPSHOT"]] And also adding some config, telling the library where the source is, and where to compile the results too. :sass {:src "resources/sass" :output-directory "resources/public/assets" :output-extension "css"} Jolly good, except I wanted to follow Rails example and use fingerprinting of the file name. Over on the Rails site, it details why this is a good idea. So I was satisfied with the solution thus far, but wanted to get a little more out of it. Asset pipeline Once again, a lot of this intelligence is wrapped up in the Rails source code and with it’s convention over configuration influence. Rails uses sprockets under the hood, and once again it seems someone has thought about this already, and created standalone-sprockets to mimic Rails without Rails. First, add the gem to a Gemfile, I’m using a branch as I found an issue and am waiting for it to get merged and released to Rubygems. source '' gem 'sprockets-standalone', github: 'robb1e/sprockets-standalone' Then in your Rakefile require 'sprockets/standalone' Sprockets::Standalone::RakeTask.new(:assets) do |task, sprockets| # what assets to compile task.assets = %w(application.css) # what directory those assets are in task.sources = %w(resources/sass) # where to copy the compiled files to task.output = File.expand_path('resources/public/assets', Dir.pwd) # also create a .gz version task.compress = true # create the fingerprint in the filename task.digest = true end Now when you run rake -T you’ll see the following tasks: rake assets:clean # Clean old assets rake assets:clobber # Remove all assets rake assets:compile # Compile assets In my case, I have a file resources/sass/application.sass which compiles to resources/public/assets/application-FINGERPRINT.css and resources/public/assets/application-FINGERPRINT.css.gz when rake assets:compile is run. So far so good. This process also generates a JSON manifest file which creates a key/value table of original to compiled filenames, i.e. application.css is now application-b732b413fd9399adc58caf40c3356709.css. We need to ensure org.clojure/data.json is included in the dependencies in our project.clj: :dependencies [[org.clojure/data.json "0.2.4"]] I used the manifest in my layouts namespace, and start by requiring clojure.data.json: (ns robb1e.views.layout (:require [clojure.data.json :as json])) Now we can create a def which reads the manifest file: (def assets ((json/read-str (slurp "resources/public/assets/manifest.json")) "assets")) We can build upon that to retrieve the absolute HTTP path of that resource: (defn asset [file] (str "/assets/" (assets file))) We can even go one further but creating an additional def helper (def application-css [] (asset "application.css")) Deploying to Heroku Heroku recommends that an application check it’s assets in to Git before pushing, and this will work here as well. It’s a little cumbersome, but does work. There is also copying this to a CDN as part of a deployment process and including the CDN domain name in the code which concatenates the URI for the resource.
http://pivotallabs.com/author/rclutton/?tag=assets
CC-MAIN-2014-42
en
refinedweb
William F. Sharpe* STANCO 25 Professor of Finance, Emeritus, Stanford University Chairman, Financial Engines Inc. September, 2001 This paper describes a set of mean/variance procedures for setting targets for the risk characteristics of components of a pension fund portfolio and for monitoring the portfolio over time to detect significant deviations from those targets. Due to the correlations of the returns provided by the managers of a typical defined benefit pension fund it is not possible to simply characterize the risk of the portfolio as the sum of the risks of the individual components. However, expected returns can be so characterized. We show that the relationship between marginal risks and implied expected excess returns provides the economic rationale for the risk budgeting and monitoring systems being implemented by a number of pension funds. Next, we show how a fund's liabilities can be taken into account to make the analysis consistent with goals assumed in asset/liability studies. We also discuss the use of factor models as well as aggregation and disaggregation procedures. The paper concludes with a short discussion of practical issues that should be addressed when implementing a pension fund risk budgeting and monitoring system. Investment portfolios are composed of individual investment vehicles. A personal portfolio might be made up of equity and fixed income securities. An institutional portfolio might be made up of investments run by individual managers, with each such investment made up of equity and/or fixed income securities. Traditionally, each of the components of a portfolio is an asset, the future value of which cannot fall below zero. In this environment the total monetary value of the portfolio is typically considered an overall budget, to be allocated among investments. In a formal portfolio model, the decision variables are the proportions of total portfolio value allocated to the available investments. For example, in a portfolio optimization problem, the "budget constraint" is usually written as: Si Xi = 1 where Xi is the proportion of total value allocated to investment i. This approach does not work as well for portfolios that include investments that combine equal amounts of long and short positions. For example, a trading desk may choose to take a long position of $100 million in one set of securities and a short position of $100 million in another. The net investment is zero But this would also be true of a strategy involving long positions of $200 million and short positions of $200 million. For this type of portfolio, some other budgeting approach may be more desirable. One solution is to include a required margin, to be invested in a riskless security, and state gains and losses as percentages of that margin. This may suffice for a fund which uses few such investments, but is less than satisfactory for funds and institutions that utilize large short and long positions. In recent years hedge funds and financial institutions with multiple trading desks have developed and applied a different approach to this problem. Instead of (or in addition to) a dollar budget, they employ a risk budget. The motivation is straightforward. The goal of the organization is to achieve the most desirable risk/return combination. To do this it must take on risk in order to obtain expected return. One may think of the optimal set of investments as maximizing expected return for a given level of overall portfolio risk. The latter. Recently, managers of defined benefit pension funds have taken an interest in employing the techniques of risk budgeting and monitoring.. To some extent this has been motivated by a desire to better analyze positions in derivatives, hedge funds and other potentially zero-investment vehicles. But even a fund with traditional investment vehicles can achieve a greater understanding of its portfolio by analyzing the risk attributes of each of the components. Much of the practice of risk management in financial institutions is concerned with short-term variations in values. For example, a firm with a number of trading desks may be concerned with the effect of each desk on the overall risk of the firm. Risk management systems for such firms are designed to control the risk of each of the trading desks and to monitor each to attempt to identify practices that may be "out of control" -- adding more risk to the portfolio than was intended. Often such systems employ valuations made daily or even more frequently. Moreover, the horizon over which risk is calculated is typically measured in days rather than weeks, months or years. Pension funds differ from such institutions in a number of respects. The components in a pension fund portfolio are typically accounts managed by other investment firms or by groups within the organization. Such accounts may or may not be valued daily, but major performance reports are typically produced monthly, using end-of-month valuations. Horizons for risk and return projects are often measured in years if not decades. Finally, to identify and control an external manager who is taking excessive risk can be very difficult. Another key attribute of a defined benefit pension fund is its obligation to pay benefits in the future. This gives rise to a liability, so that its investment practices should, in principle, be viewed in terms of their impact on the difference between asset and liability values. It is possible for a pension fund to gather data on the individual securities held by its managers and to establish a risk measurement and monitoring system using daily data on a security-by-security level, and some funds have implemented systems designed to do this, thereby replicating the types of risk management tools used by financial institutions. However, such systems are complex, require a great deal of data, and are costly. For this reason, a pension fund manager may choose a less ambitious approach, relying instead on data about the values of manager accounts provided on a less frequent basis. This paper focuses on procedures that can be employed in such a system, with investment managers evaluated on the basis of returns on invested capital, as in standard performance reporting and analysis. Central to any risk budgeting and monitoring system is a set of estimates of the impacts of future events that can affect the value of the portfolio. In some systems, actual historic changes in values are used as estimates of possible future scenarios. In others a portfolio is subjected to a "stress test", for example, by assuming that future events might combine the worst experiences of the past at the same time even though they actually occurred at different times. While the direct use of historic data provides useful information, for longer-horizon projections it is more common to consider a broader range of possible future scenarios based on models of the return-generating process. A standard approach utilizes estimates of risks and correlations, with assumptions about the shapes of possible probability distributions. Often a factor model is employed in order to focus on the key sources of correlated risks. In this paper we assume that such an approach is utilized. Moreover, we assume that only a single set of estimates for such a model is needed, although the procedures described here can be adapted relatively easily for use with alternative estimates as part of stress testing. Most large pension funds take a two-stage approach when allocating funds among investments. The top stage involves a detailed study of the fund's allocation among major asset classes, usually (but not always) taking into account the liabilities associated with the fund's obligation to pay future pensions. For example, such an asset allocation or asset/liability study might be performed every 3 years. Its result is a set of asset allocation targets and a set of allowed ranges for investment in each asset class. Based on the asset allocation analysis, funds are allocated among investment managers, subject to the constraints that each asset exposure fall within the specified range. In many cases, the fund asset allocation analysis uses a standard one-period mean/variance approach, often coupled with Monte Carlo simulation to make long-term projections of the effects of each possible asset allocation on the fund's future funded status, required contributions, etc.. For most large funds asset allocation typically accounts for well over 90% of total risk, justifying the attention given it by management and the fund's investment board. The world of investments is very complex, with hundreds of thousands of possible investment vehicles. This makes it virtually impossible to estimate risks and correlations on a security-by-security basis. For this reason, almost all risk estimation systems employ some sort of factor model. A relatively small number of factors are identified and their risks and correlations estimated. It is assumed that the return on any individual investment component can be expressed as a function of one or more of these factors plus a residual return that is independent of the factors and of the residual returns on all other investments. This reduces the estimation problem to one requiring estimates of the factor risks and correlations plus, for each investment component, the estimation of the parameters in the function relating its return to the factors and the risk associated with its residual return. In some systems, factors are assumed to exhibit serial correlation over time, but a component's residual returns are generally assumed to be independent from period to period. One-period returns are generally assumed to be normally or log-normally distributed, with multi-period returns distributions determined by the characteristics of the underlying process. In practice, the factors used in a risk budgeting and monitoring system are likely to differ from the asset classes used for the fund's asset allocation studies. Ideally there should be a clear correspondence between the two. We do not consider such issues here. Instead we allow for the possibility that the factor model and risk and correlation estimates used in the risk budgeting system may differ from the model and estimates used in the asset/liability analyses. Following common practice in the pension fund industry, we assume that factor models used in the overall process are linear. More precisely, we assume that the return on any investment component can be expressed as a linear function of the returns on the factors (or asset classes) plus an independent residual. We are concerned with a pension fund that employs a number of investment managers. The fund has a fixed number of dollars in assets, A and a liability L, representing the present value of its accrued obligations. Ideally, both assets and liabilities should be measured in terms of market value. In practice, however, liabilities are often obtained as a by-product of actuarial analyses designed to determine appropriate current fund contributions. Such liabilities are typically not (nor were they intended to be) equal to the market value of any particular definition of accrued liabilities. Such actuarial liability values tend to respond relatively slowly to changes in the values of assets, interest rates, etc.. For this reason, analyses that compare market values of assets with actuarial values of liabilities give results that are affected less by the inclusion of liabilities than would be the case if the market values of liabilities were used. Ultimately, the decision variables for the fund's overall allocation are the amounts given to each of the investment managers (henceforth, managers). We assume that it is possible to determine each manager's factor exposures and residual risk. This could be performed using a top-down approach such as returns-based style analysis or a bottom-up procedure based on the manager's security holdings. The best approach depends on the horizon over which risk is to be estimated, the costs and accuracies of alternative models, and other considerations. Here we simply assume that such measures have been obtained. To fix ideas, we start with an extremely simple example. A fund has identified three asset classes -- cash, bonds and stocks. It plans to allocate its money ($100 million) among three managers, in accordance with the results of an asset allocation study. Each manager runs an index fund that tracks one of the classes exactly. Thus the manager selection problem and the asset allocation problem are one and the same. For now we ignore the fund's liabilities, concentrating solely on the risk and return of the fund's assets. To simplify the analysis, all calculations deal with excess returns, where the excess return on an asset is defined as its return minus the return on cash. Thus we are concerned with expected excess return (EER), standard deviation of excess return (SDER) and the correlations of excess returns. The assumptions used for the asset allocation study are these: Expected Excess Returns and Risks Correlations For computational ease, the standard deviations and correlations can be combined to produce a covariance matrix, which includes all the risk estimates. The covariance between two asset classes is simply their correlation times the product of their standard deviations. Including cash, we have: Covariances Given the estimated expected excess returns and covariances, it is possible to find a Markowitz-efficient portfolio for any given level of risk. By definition, such a portfolio provides the greatest possible expected excess return for the level of risk. For our example we assume that after considering the long-term effects of each of several such portfolios using Monte Carlo simulation, the fund selected the following: Optimal Asset Allocation This portfolio's expected excess return and standard deviation of excess return are: Portfolio Expected Excess Return and Risk In this case, each manager is given the specified amount for its asset class and each manager's return equals that of its asset class. Given this, we can compute the dollar expected excess return ($EER) for each manager and its proportion of the portfolio's dollar expected excess return.. For example, the bond manager has 29.76% (.2976) of the total asset value of $100 million, or $29.76 million. The expected excess return on this part of the portfolio is 2% per year (.02), so that the manager is expected to add $595.2 thousand or $0.5952 million to the portfolio, over and above the amount that could have been earned by putting the money in cash. Similar computations show that the stock manager is expected to add $3.7698 million over and above the amount that could have been earned in cash. Since the cash manager cannot contribute any expected excess return, the total expected added value is $ 4.3651 million, which is consistent with the portfolio expected excess return obtained in the asset allocation study. Dollar Expected Excess Returns The final column in this table shows the proportion of total dollar expected excess return contributed by each manager, obtained by dividing its $EER by the portfolio's total $EER. In this case the bond manager is expected to contribute approximately 13.6% of the expected excess return and the stock manager to contribute 86.4%. Note that this differs significantly from the amount of assets allocated to each (approximately 29.8% and 62.8%, respectively). It would be straightforward to compute the risk of each manager's position, in percentage or in dollar terms. We could also compute a value-at-risk (VAR) amount for each manager, indicating, for example, an amount that the manager's value could fall below with 1 chance out of 20. But such measures are not sufficient to determine the effect of a manager's investments on the risk (or VAR) of the portfolio as a whole, since they do not take into account the correlations among manager returns. It would seem that the entire concept of risk budgeting is doomed to failure in cases involving risks that are correlated, since risks are not additive when they are correlated. Fortunately there is a measure of manager risk contribution that can serve as the foundation of a budgeting procedure, as we now show. Define the marginal risk of a manager as the change in total portfolio risk per unit change in the amount allocated to the manager when the amount is small. More precisely, it is the derivative of portfolio risk, expressed as variance (standard deviation squared) with respect to the amount allocated to the manager. The marginal risk of a manager ( MRi ) will equal twice its covariance with the portfolio as a whole, which is in turn a weighted average of the covariances of the manager's returns with those of the other managers, using the current portfolio proportions as weights. MRi = dVp/dXi = 2 Cip = 2 Sj Xj Cij where d denotes a partial derivative. Applying the formula to the case at hand gives the following. The stock manager has three times the marginal risk of the bond manager. Recall that the expected excess return of the stock manager is also three times that of the bond manager. As we will see, this is not a coincidence. A Markowitz-efficient portfolio offers the greatest expected return for a given level of risk. To find a set of such portfolios it is usually more computationally efficient to solve the following problem: Maximize: EU = EERp - Vp / rt Subject to: Si Xi = 1 where EERp and Vp are respectively the expected value and variance of portfolio excess return. For a given level of rt (risk tolerance), the solution will provide an efficient portfolio. By solving with different levels of rt, all efficient portfolios can be found, and the one with the most desirable risk and expected return selected based on the preferences of the decision-makers. In our example, the selected portfolio maximizes EU (expected utility) for a risk tolerance (rt) of 75. In practice, optimization problems are formulated with inequality constraints. The amounts invested in at least some asset classes are constrained to lie above a lower bound (e.g. 0) and/or below an upper bound (e.g. .25, or 25% of the portfolio). We discuss this later. For now, we assume that such constraints are absent or, if present, none is binding in the solution. Consider the marginal expected utility (MEU) of a position in a portfolio, defined as the rate of change of expected utility (EU) per unit change in the amount invested in that position. This will equal: MEUi = dEU/dXi = dEERp/dXi - ( dVp/dXi) / rt But under the assumption that the expected return of an asset is the same regardless of the amount invested in it, the derivative of EERp with respect to Xi will equal EERi Moreover, the derivative of Vp with respect to Xi is the value that we have defined as the manager's marginal risk, MRi. So we can write: MEUi = EERi - MRi / rt Imagine a portfolio in which the marginal expected utilities of two managers differ. Clearly the portfolio is not optimal. Why? Because one could take some money away from the manager with the lower MEU and give it to the manager with the higher MEU, thereby increasing the expected utility of the portfolio. It thus follows that a condition for portfolio optimality in the absence of binding constraints is that: MEUi = k for all i where k is a constant This is the first-order condition for portfolio optimality. It provides the economic basis for risk management systems of the type we study here. To see why, consider cash, the risk-free asset. Its expected excess return (EERi) is zero, as is its risk (Vi) and marginal risk (MRi). Thus its MEUi value will equal zero. But this requires that k=0. Thus for each manager (i): MEUi = EERi - MRi / rt = 0 so that: EERi = MRi / rt The previous equation is central to the motivation for a risk management system based on mean/variance analysis. One way to solve an optimization problem in the absence of inequality constraints is to find the set of proportions (Xi's) summing to one for which each manager's marginal risk (MRi) is equal to its expected return times the fund's risk tolerance. In other words, solve the set of simultaneous equations that will make MRi = rt * EERi for every asset i. This process is termed portfolio optimization. For our purposes, it is more instructive to reverse this process. Assume that a portfolio is optimal and that the covariances of its components are known, as is the risk tolerance of the fund. Then one can find the expected excess returns for the components using the first-order conditions EERi = MRi / rt for every asset i. This is generally termed reverse optimization. It is also sometimes described as finding the implied views (of expected excess returns) for a portfolio. Assume that the covariance matrix for a fund's managers is known. To compute implied expected excess returns one only needs an estimate of the fund's risk tolerance (rt). But this can be found if one manager's expected excess return is known. Recall our example. Using only the covariance matrix, we computed the marginal risks for the managers. To find the implied expected excess returns we need only know the expected excess return of one component or combination of components. For example, the marginal risk of the stock manager was 450. If the expected excess return on stocks is 6%, then rt = 450/6, or 75 and the implied expected excess returns are those shown below. Not surprisingly, the implied values of the asset EER's are identical to those that were used in the optimization process. Given expected excess returns, we can compute dollar expected excess returns as before as well as the proportions of total dollar expected excess return. However, the latter will be the same regardless of risk tolerance, and can be computed solely from the covariance matrix and portfolio composition. Defining P$EERi as the proportion of dollar expected excess return provided by manager i: P$EERi = Xi EERi / Si ( Xi EERi ) = ( Xi MRi / rt ) / Si ( Xi MRi / rt ) = Xi MRi / Si ( Xi MRi ) This relationship both explains and justifies the computations that lie behind mean/variance risk budgeting and management. Marginal risks act as surrogates for expected excess returns. An overall fund should be managed so that the marginal risk of each of its components is commensurate with the expected excess return of that component. Given expectations about returns, a risk budget can be established, with each component allowed to have a marginal risk (MRi) and fund allocation (Xi) that give it an appropriate contribution (P$EERi). Recall from our earlier discussion that the marginal risk of a portfolio component will equal twice its covariance with the portfolio. From the properties of covariance we know that: Si Xi MRi = Si Xi 2 Cip = 2 Si XCip = 2 Vp Thus the sum of the weighted marginal risks of the portfolio components will equal twice the variance of the overall portfolio. This leads some to define the risk contribution of a component as half its marginal risk (that is, its covariance with the portfolio) so that a weighted average of these values will equal the variance of the overall portfolio. Clearly, this will have no effect on the P$EERi values. However, it sometimes leads to an incorrect view that it is possible to decompose portfolio risk into a set of additive components and to incorrect statements of the form "this manager contributed 15% to the total risk of the portfolio". There is a case in which computations based on marginal risks do provide an additive decomposition of total portfolio risk. If all component returns are independent, the marginal risk of manager i will equal 2XiVi and the product XiMRi will equal 2Xi2Vi. Summing these values over managers will give an amount equal to twice the portfolio variance. In this special case, defining a manager's risk contribution as half its marginal risk thus makes the product XiMRi equal precisely its contribution to total portfolio risk. We will see that the assumption of independence may be appropriate for the portion of a manager's return that is not related to the factors included in the underlying factor model and that this interpretation of risk decomposition can be applied to that portion of overall portfolio variance. Much of the literature on risk budgeting and monitoring focuses on such non-factor risk and thus is justified in claiming that the procedures provide an allocation of portfolio risk. However, this is not applicable for the correlated components of a pension fund, which generate most of its risk. Humans best process information when relationships are linear. Expected returns are linear, and the expected return of a portfolio can be decomposed into portions provided by each of the portfolio components. This is not generally the case for risk. For this reason, the computations we have described, which are utilized in many pension fund mean/variance risk budgeting and monitoring systems are best viewed in terms of implied expected excess return budgets and deviations therefrom. In practice pension fund risk budgeting and monitoring involves three related but somewhat separate phases. In the first phase, the fund selects a policy portfolio in which dollar amounts are allocated to managers. As indicated earlier, this often involves two stages: (1) an asset allocation or asset/liability study using optimization analysis that allocates funds among asset classes, and (2) the subsequent allocation of funds among managers using procedures which may be quantitative, qualitative or a combination of both. Rarely is the policy portfolio determined entirely by mean/variance optimization, and even the optimization analysis utilized as part of the process often involves binding constraints so that the first order conditions we have described will not hold strictly for every component. In any event, we term this entire process the policy phase. Part of the process will use expected returns and covariances, which we term the policy expected returns and policy risks and correlations. It will also use a (possibly implicit) factor model which we will term the policy factor model. The output of this phase is the policy portfolio, which allocates specific amounts of capital to each of the components of the fund. The second phase is the establishment of the fund's risk budget. This makes the (possibly heroic) assumption that the policy portfolio is optimal in a mean/variance sense at the time of its designation. Ideally this would be interpreted using the policy factor model and policy expected returns and covariances. However, the very nature of the policy phase may make this impossible, since there are typically insufficient estimates of risks and returns, and in most cases the policy portfolio is not created directly from a formal unconstrained optimization. In practice, therefore, a more complete factor model and set of risks and correlations, typically provided by an outside vendor, is used to perform a reverse optimization based on the policy portfolio as of the date of its formation. This yields a series of implied expected excess returns and proportions of the portfolio's dollar expected excess return attributable to each component. Such values represents the components' risk budgets (RB). From our previous formulas this can be stated most directly as: RBi = XiCip / Vp for each manager i. Even if the policy portfolio is implemented precisely when the policy phase is completed, market movements, manager actions and changing risks and correlations will lead to changes in many or all aspects of the situation. This leads to the third phase. Current estimates of risks and correlations, typically provided by the outside vendor, along with manager positions and/or return histories, are used to compute a new set of values for investment proportions, covariances and portfolio variance. This provides the current set of risk proportions (RP). Letting primes denote current values, we have: RPi = Xi'Cip' / Vp' The monitoring phase involves comparisons of the current risk proportions with the risk budgets. Significant disparities lead to evaluation, analysis and in some cases action. Not surprisingly, this process can easily be misunderstood and misused. The risk budget figures are actually surrogates for proportions of expected value added (over cash) at the time of the policy analysis. The risk proportion figures are surrogates for the proportions of implied expected value added, based on the current situation. The presumption is that large disparities need to be justified by changes in estimates of the abilities of the manager to add value. Lacking this, some sort of action should be taken. Note, however, that a change in a manager's RP value may be due to events beyond his or her control. Moreover, there are many ways to change a manager's RP value if such a change is needed. The amount invested (Xi) may be adjusted, as may the covariance of the manager's retrun with that of the portfolio. The covariance may, in turn, be changed by altering the manager's investment strategy or by changing the allocation of funds among other managers and/or the strategies of other managers. In any event, the comparison of RP values with RB values provides a discipline for monitoring an overall portfolio to insure that it remains reasonably consistent with original or modified estimates of the abilities of the managers. While the actions to be taken in the event of significant disparities are not immediately obvious, it is important to know when actions of some sort are desirable The diagram below shows the three phases. The policy phase is performed periodically, followed by the risk budgeting phase. The monitoring phase is then performed frequently, until the process begins anew with another policy phase. Thus far we have ignored the presence of liabilities, assuming that all three phrases focus on the risk and return of the fund's assets. However, this may not be appropriate for a fund designed to discharge liabilities. Fortunately, the procedures we have described can be adapted to take liabilities into account. We continue to utilize a one-period analysis. Current asset and liability values are A0 and L0, respectively. At the end of the period the values will be A1 and L1, neither of which is known with certainty at present. We define the fund's surplus as assets minus liabilities. Thus S0=A0-L0 and S1=A1-L1. We assume that the fund is concerned with the risk and return of its future surplus, expressed as a portion of current assets, that is S1/A0. Equivalently, S1 / A0 = (A1/ A0 ) - ( L0 / A0 ) * ( L1 / L0 ) The first parenthesized expression equals 1 plus the return on assets, while the last parenthesized expression can be interpreted as 1 plus the return on liabilities. Defining the current ratio of liabilities to assets (L0/A0) as the fund's debt ratio (d), S1/A0 may be written as: (1-d) + RA - d*RL The parenthesized expression is a constant and hence cannot be affected by the fund's investment policy. We thus consider only the difference between the asset return ( RA ) and the liability return multiplied by the debt ratio (d*RL ). We are now ready to write the expected utility as a function of the expected value and risk of ( RA - d*RL )Let rt be the fund's risk tolerance for surplus risk. Then: EU = E( RA ) - E( d* RL ) - V( RA - d*RL ) / rt Expanding the variance term gives: EU = E( RA ) - E( d* RL ) - V( RA ) / rt + 2*d*CAL / rt - d2*VL / rt Since the decision variables are the asset investments, we can ignore terms that are not affected by them. Neither the expected liability return nor the variance of the liability return is affected by investment decisions. Hence for optimization and reverse optimization purposes we can define expected utility as: EU = E( RA ) - V( RA ) / rt + 2*d*CAL / rt Note that this differs from expected utility in the case of an asset-only optimization only by the addition of the final term, which includes the covariance of the asset return with the liabilities. Moreover, covariances are additive, so that the covariance of the asset portfolio with the liabilities will equal a value-weighted average of the covariances of the components with the liabilities. This implies that the marginal expected utility of component i will equal: MEUi = EERi - MRi / rt + 2*d*CiL /rt where CiL is the covariance of the return of asset i with that of the liabilities. We can now write the first order condition for optimality in an asset/liability analysis as: MEUi = EERi - MRi / rt + 2*d*CiL / rt = k for each asset i But the risk-free asset (here, cash) has zero values for all three components. Hence, as before, k=0 so that: EERi - MRi / rt + 2*d*CiL / rt = 0 and: EERi = MRi / rt - 2*d*CiL / rt To compute implied expected excess returns we need only subtract the covariance of an asset with the liability from its marginal risk, then divide by risk tolerance. Alternatively, recalling that MRi= 2*Cip , we can write: EERi = (2 / rt ) * ( Cip - d*CiL ) All the procedures described in the asset/only case can be adapted in straightforward ways to incorporate liabilities. For example, the risk budgets and risk positions can be determined using the following formulas: RBi = [ Xi * ( Cip - d*CiL ) ] / [ Vp - d*CpL ] RPi = [ Xi' * ( Cip' - d'*CiL' ) ] / [ Vp' - d'*CpL' ] where, as before, the variables without primes reflect values at the time of the policy analysis and the variables with primes reflect values at the current time. Due to the properties of variances and covariances, the values of RBi will sum to one, as will the values of RPi. As indicated earlier, most risk estimation procedures employ a factor model to provide more robust predictions. Generically, such a model has the form: Ri = bi1F1 + bi2F2 + .... + binFn + ei where bi1, bi2, ...., bin are the sensitivities of Ri to factors F1, F2, ...,Fn , respectively and ei is component i's residual return. Each ei is assumed to be independent of each of the factors and of each of the other residual returns. A risk model of this type requires estimates of the risks (standard deviations) of each of the factors and of each of the residual returns. It also requires estimates of the correlations of the factors with one another. Note that in this model each return is a linear function of the factors. We may thus aggregate, using the proportions held in the components to obtain the portfolio's return: Rp = bp1F1 + bp2F2 + .... + bpnFn + ep where each value of bp is the value-weighted average of the corresponding bi values and ep is the value-weighted average of the ei values. It is convenient to break each return into a factor-related component and a residual component. Defining RFi as the sum of the first n terms on the right-hand side of the equation for Ri, we can write: Ri = RFi + ei Similarly, for the portfolio: Rp = RFp + ep Now consider the covariance of component i with the portfolio. By the properties of covariance, it will equal Cip = cov( RFi + ei , RFp + ep ) = cov( RFi , RFp ) + cov ( RFi , ep ) + cov( ei , RFp ) + cov( ei , ep ) By the assumptions of the factor model, the second and third covariances are zero. Hence: Cip = cov( RFi , RFp ) + cov( ei , ep ) Recall that: ep = Si Xi ei Since the residual returns are assumed to be uncorrelated with one another, the covariance of ei with ep is due to only one term. Let vi be the variance of ei (that is, component i's residual variance). Then: cov( ei , ep ) = Xi vi and Cip = cov( RFi , RFp ) + Xi vi Substituting this expression in the formula for the implied excess return in the presence of liabilities we have: EERi = (2 / rt ) * ( cov( RFi , RFp ) + Xivi - d*CiL ) This can be regrouped into two parts -- one that would be applicable were there no residual risk, and one that results from such risk: EERi = (2 / rt ) * [ cov( RFi , RFp ) - d*CiL ] + (2 / rt )*Xivi The final term is often termed the manager's alpha value -- that is the difference between overall expected return and that due to the manager's exposures to the factors (and here, covariance with the fund's liability). Thus we have: EERi = Factor-related EERi + ai where: Factor-related EERi = (2 / rt ) * ( cov( RFi , RFp ) - d*CiL ) and ai = (2 / rt ) * Xivi Just as implied expected returns can be decomposed into factor-related and residual components, so too can risk budgets and risk proportions. For example, a manager could be given a budget for factor-related contributions to risk and a separate budget for residual risk. Many systems concentrate on the latter, which has substantial advantages since, as indicated earlier, the contributions to portfolio residual risk do in fact add to give the total portfolio residual variance. However, they cover only a small part of the total risk of a typical pension fund. Expected excess returns are additive. Thus the expected excess return for a group of managers will equal a weighted average of their expected excess returns, using the relative values invested as weights. Covariances are also additive. Thus the marginal risk of a group of managers can be computed by weighting their marginal risks by relative values invested. This makes it possible to aggregate risk budgets and risk proportions in any desired manner. For example, a fund may be organized in levels, with each level's risk budget allocated to managers or securities in the level below it, and so on. Thus there could be a risk budget for equities, with sub-budgets for domestic equities and international equities. Within each of these budgets there may be sub-budgets for individual managers, and so on. The following tables provide an example for a large pension fund. These results were obtained using an asset-class factor model with historic risk and correlation estimates. The relationships of the managers to the factors were found using returns-based style analysis. Residual variances are based on out-of-sample deviations from benchmarks based on previous returns-based style analyses. After the managers were analyzed, they were combined into groups based on the fund's standard classifications. Implied expected excess returns were calibrated so that a passive domestic equity portfolio would have the expected excess return used in thef und's most recent asset allocation study. Although the fund does take liabilities into account when choosing its asset allocation, the figures shown here are based solely on asset risks and correlations. Implied Expected Excess Returns (EER) Note that the implied alpha values are all small, including four that are less than 1/2 of 1 basis point and thus shown as 0.00. This is not unusual for funds with many managers. A high degree of diversification is consistent with relatively low expectations concerning managers' abilities to add value. As we have shown, the implied expected excess returns can be combined with the amounts allocated to the managers to determine the implied expected values added over a cash investment, which we have termed the dollar expected excess returns ($EER). These can be divided by the total $EER for the portfolio to show the relative contribution to excess expected return for each manager or aggregate thereof. The final three columns of the following table shows the results for the fund in question, broken into the factor-related component and residual-related component (alpha). Percents of Implied Portfolio Dollar Expected Excess Return In this case, by far the largest part of the implied added value (98.83 %) is attributable to the managers' factor exposures. This has a natural interpretation as the proportion of portfolio variance explained by factor risks and correlations plus the portfolio's exposures to those factors. This follows from the fact that the alpha values are derived from contributions to residual variance, each of which equals Xi2vi, making the sum equal to the portfolio's residual variance. Reports such as this can be valuable when allocating pension fund staff resources. Note, for example, that the fund has allocated slightly less than 45% of its money to domestic equity managers, but this analysis indicates that such managers should be expected to provide over 60% of the added value over investing the entire fund in cash. This might lead to the conclusion that 60% of staff resources might be assigned to this part of the portfolio, instead of 45%. We have chosen to present the results of this analysis in terms of implied dollar expected excess returns. However in most risk budgeting systems the terms "risk budget" and "risk contribution" would typically be used instead. For example, assume that the prior report was produced using the fund's policy portfolio. Then the percentages in the final column would constitute the "risk budgets" for the aggregate groups. At subsequent reporting periods the same type of analysis could be performed, giving a new set of results, which could be compared with those obtained at the time the policy phase was completed. The resulting report would have the following appearance, with the final two columns filled in based on the current situation. In many systems each part of the portfolio is given both a risk budget and an accompanying set of ranges. Often the latter are broken into a "green zone" (acceptable), a "red zone" (unacceptable), with a "yellow zone" (watch) between. While risk budgeting and monitoring systems can prove very useful in a pension fund context, some issues associated with their implementation need to be addressed. As we have shown, the central principle behind the use of risk budgets based on mean/variance analysis is the assumption that a particular portfolio is optimal in the sense of Markowitz, with no binding inequality constraints. This may be inconsistent with the procedures used to allocate funds among managers at the time of a policy study (or at any time thereafter). It is true that asset class allocations are typically made with the assistance of optimization analysis. However, the formal optimization procedure often includes bounds on asset allocations, some of which are binding in the solution. Moreover, the results of the optimization study provide guidance only on allocation across broad asset classes and the study typically assumes that all funds are invested in pure, zero-cost index funds, each of which tracks a single asset class precisely. Actual implementations involve managers that engage in active management and often provide exposures to multiple asset classes. Since the eventual allocation of funds across managers is made using a variety of procedures, some quantitative, others qualitative, the resulting allocation may not be completely optimal in mean/variance terms. Potential problems may also arise when the asset allocation model uses one set of factors (the asset class returns), while the risk budgeting and monitoring system uses another. Even if the policy portfolio is optimal using the policy factor model, expected returns and risk and correlation assumptions, it may not be optimal using the risk budgeting system's factor model, manager factor exposures and risk and correlation estimates. Yet this may be assumed when the risk budgets are set. Finally, there is the problem of choosing an appropriate action when a risk proportion (RP) diverges unacceptably from a previously set risk budget (RB). Consider a case in which the risk proportion exceeds the risk budget. Should money be taken away from the manager or should the manager be asked to reduce his or her contribution to portfolio risk? If the latter, what actions should the manager take? One alternative is to reduce residual risk, but this may not be sufficient, and may lower the manager's chance of superior performance. The manager could be asked to change exposures to the underlying factors, but such changes could force a manager to move from his or her preferred "style" or investment habitat, with similar ill effects on overall performance. Some of these problems are mitigated if the risk budgeting and monitoring system deals only with residual (non-factor) risks. But, for a typical pension fund such risks constitute a small part of overall portfolio risk, which is consistent with low implied expectations for added return (alpha). To provide a comprehensive view of a portfolio it is important to analyze both the small (uncorrelated) part of its risk and the large (correlated) part. We have shown that a great many results can be obtained by combining a risk model with attributes of a fund's investments. A portfolio based on a policy study and its implementation can be used to set targets, or risk budgets. These can be used to allocate effort for manager oversight, selection, and monitoring. Subsequently, actual portfolios can be analyzed to determine the extent to which risk computations based on current holdings differ from those obtained using policy holdings. Significant differences can then be used to initiate changes, as needed. At this date, the use of risk budgeting and monitoring by defined benefit pension funds is limited. As more funds implement such procedures we will find the strengths and weaknesses in this context and deal with issues associated with their implementation . There is no doubt that risk budgeting and monitoring systems can produce large amounts of data. In time we will learn how to insure that they produce the most useful information. * There is an extensive literature on Risk Budgeting and Monitoring Systems. An excellent source is Risk Budgeting, A New Approach to Investing, edited by Leslie Rahl, (Risk Books, 2000). In describing and interpreting some of the procedures used in risk budgeting systems I have drawn on a great deal of work done by others as well as some of my earlier results. The idea of computing implied views of expected excess returns based on portfolio composition and covariances can be found in William F. Sharpe, "Imputing Expected Returns From Portfolio Composition," Journal of Financial and Quantitative Analysis, June 1974. The relationship between an asset's expected return and its covariance with a set of liabilities is described in William F. Sharpe and Lawrence G. Tint, "Liabilities -- A New Approach,", Journal of Portfolio Management, Winter 1990, pp. 5-10.
http://web.stanford.edu/~wfsharpe/art/q2001/q2001.htm
CC-MAIN-2014-42
en
refinedweb
If you’ve been writing Windows apps for more than a few weeks, you’ll probably have read an article or heard someone mention “MVVM”. MVVM is one of those things that is easy to file away in the back of your mind as something you probably should know about, and therefore something you might feel a bit guilty about not knowing or doing more about. And the more you think you should know about it, the more you think it’s probably too late to do anything about it, and your coding soul is damned forever, so there’s no point even starting now. Well, I’ve some good news for you. MVVM is not as bad as you think. Sure, if you start Binging around for MVVM web hits you’ll end up reading in-depth (and frankly scary) articles about or by Martin Fowler and wondering how you managed to write any code without knowing any of this stuff. And if you overhear any coding conversations in Redmond Starbucks you might quickly get very lost and feel inadequate, but, hey that’s just normal and you shouldn’t really be eavesdropping anyway, should you? So today, I’m going to take a stab at explaining what MVVM is, hopefully well enough that you can bluff your way through a conversation at a party frequented by Microsoft developers (and I’m sure those are real, just because I’ve never been to one I’m sure they still exist and are a complete riot). Design Patterns If you have been working on other platforms for a while, you might have heard of the subject of design patterns, and MVC in particular. Design patterns provide a ‘big picture’ as to how you might structure your app. It’s nothing to do with a particular programming language, it’s all about how you can divide up the work into components, with the overall aim of making your code easier to read, test, re-use and maintain. Think of it as learning good coding habits. MVC is one of these patterns, and stands for Model, View and Controller. It’s about keeping your program's data, UI and logic in separate chunks. If you have been developing for iOS using Xcode, you’ll have bumped into the MVC design model – the way the UIViewControllers work is pure MVC – but the problem is that it’s so easy to wander completely off the MVC path and lose all the benefits you might not even have realized you are doing it, especially in smaller apps. Here’s a rather nice article on using Xcode and the MCV design pattern properly. Now, here’s the thing: these design patterns are all just guidelines. There’s no compiler warnings when you do them “wrong”. If you want to use them, you have to bring the discipline to your projects yourself. In many cases, perhaps smaller apps for mobile devices written by one person, there’s not a strong case to be made to enforce this design. However, as apps get larger, and more and more people besides you get involved, and more testing is required.. well, soon it becomes very apparent that there are some really good reasons to dig deep and find the inner strength to use a design pattern. Some developers might say that as your career progresses from noob to experienced developer, you go through several stages: First, you’re just glad that your app mostly works and doesn’t cause the computer to catch fire. Then you start to write your code in such a way that you can actually read it a few weeks later, and don’t feel too bad if someone else reads it. Then you realize you need to be using a Source Code Management system like Git (see Visual Studio Online) so you can sensibly back-up your code, manage releases and work with other developers. And then, after that, you’ll start to wonder if design frameworks can help you when writing larger projects, so you don’t go insane after you’ve written 20,000 lines of code and it’s all in one class. Finally, you want to make beautiful code. Code that is clear, and legible and something that you can re-use and test and generally rely on. A mix of poetry and engineering, and yes, you really should get out more. Me? I’m still at the stage when I’m relieved when my phone doesn’t explode, but a man can dream.. MVVM MVVM is a very difficult to pronounce palindromic acronym, which doesn’t help its case. Mumvveemmm? It stands for Model, View, View Model. Yes, it’s similar to MVC but it takes advantage of some C#/XAML features to help instill some discipline to keep you on the right track. Let’s look at the components separately, to get a better understanding of how it works. 1. View The “View” is the easiest part to understand: it’s the part you look at. In our world, this is the XAML that defines the various controls that make up the app. When creating non-MVVM apps with buttons and gridviews and all the other XAML controls, you may have linked the controls (in the sense of doing something when they are clicked, or writing code to populate them) directly to handlers in the code behind page, just as in iOS when you would have used the Action event to trigger a method when a button was tapped, or set up a Delegate to popular a UITableView. But then something bad happens: the code behind pages become huge and before you know it, gigantic and the heart of your app is trapped in these classes which are designed mostly for servicing a user interface. This direct linkage is not what MVVM is about. In fact, if your XAML controls all link into the C# code-behind page, you’re not doing MVVM at all. To do MVVM well you need to minimize the amount of code you have in the code behind. If you don’t, it’s too easy to end up with your entire app stuffed into the C# code behind, responding directly to Click events. This is OK (probably.. possibly.. maybe?) for smaller apps, but it definitely isn’t OK for larger projects. When coding in the MVVM pattern, you need to create an abstraction between your buttons and other controls and the main app that makes decisions. This might seem odd: why pull things apart in this way? Won’t it make your application harder to understand? The argument in favor of MVVM is actually based in pragmatism: you know that after you’ve been working on an app for a while, you lose the idealized purity of the vision you had when you started, and you end up with lots of ‘special cases’ and extra tests, and basically things can get messy. In a project which is larger in scope than a typical app, or which has multiple people working on it, it can get very difficult to try and grok what the code is actually doing. MVVM tries to mitigate this, by cleanly separating the UI side and the program logic side. There is some extra expense involved, but in the long term, it can make projects considerably more manageable. 2. View Model So, how do you link your XAML controls with your code? That’s where the View Model, and our old friend data binding comes in. We’ve already seen how we can link data to, say, a ListView. Essentially, you set up the XAML control to be bound to a specific source (an array or list – often an ObservableCollection in fact) and then you can just let it be: the XAML control will display the contents of the array and it can also update the array if the information changes. That’s the power of data binding: you get to write less code. We’ve also seen some slightly more advanced binding, which uses data convertors to take a data source and make that control the color or visibility of a button. XAML is nothing if not flexible. Therefore, for each of your XAML pages, you should create a new class containing the View Model. Rather than the XAML pages’ code-behind files, it’s these pages which will handle the actions from the user interface, and provide the data to any controls that need it. Here's a before and after for what we hope to achieve: BEFORE AFTER By doing this, we immediately split the UI from the code that services the UI. The downside is that we now need to do a little (ahem) work in order to create the plumbing required to link the XAML side with the View Model side, and this work can be a little bit tricky the first time you try it. In fact, it’s this work which can be the most off-putting part, because at this stage is often seems like a completely unnecessary way of making things difficult for yourself. However, stick with it. It's not quite as bad as you might think. We’ll look at one approach in a minute, because it’s now time for… 3. Model The Model component is perhaps the easiest component to understand, because it’s often simply the data structures used by your app. The model could be nothing more than a class or two containing various fields: customer name, average income, likelihood they would want to be your friend, minimum distance in current court-ordered restraining order and so on. If your program is more complicated that simply storing, displaying and editing data, you could also keep your ‘logic’ here. This would be the methods which process the data in some way. Practical advice on creating an MVVM based Now for the source code! When writing an app based on the MVVM model, you can just start as you normally would and design your model component, and knock up a few pages in XAML or even Blend. However, try to avoid adding any click handlers. For each XAML page you create, create a new class to contain the View Model. You will need to instantiate this class in your XAML’s code behind. For example, if you create a blank C# Windows Store App project you will be given a MainPage.xaml and MainPage.xaml.cs files. You should then create a MainPageViewModel.cs class, and instantiate it inside MainPage.cs, like this: namespace MyMVVMApp { public sealed partial class MainPage : Page { private MainPageViewModel mainPageViewModel = new MainPageViewModel(); public MainPageViewModel MainPageViewModel { get { return this.mainPageViewModel; } } public MainPage() this.InitializeComponent(); } } Now to wire up some data, and add an event handler. Accessing Data in the View Model Let’s create a variable in the View Model, which will be data bound to a TextBlock in the XAML. We could define the type of the variable by accessing our classes in Model, but for now, let’s just use a simple string. First of all though, we need to make sure the XAML in MainPage.xaml knows where to look for data bound sources. Add the following line to the <Page> section right at the top of the file: DataContext="{Binding MainPageViewModel, RelativeSource={RelativeSource Self}}" Now we can define the TextBox control in the same XAML file too: <TextBox Text="{Binding stringData, Mode=TwoWay}" VerticalAlignment="Top" Margin="20,20,10,0" /> Notice the data binding to the variable called stringData is TwoWay in this case. This means that not only will the TextBox take its value from our bound variable, but it will also change the variable if the user edits it in the TextBox. Note: You must remember to also wire up the INotifyPropertyChanged events. If you don't, the app will work once as the View requests the initial state to display, but when you update the databound variables the UI won't get refreshed. See this blog posting on using INotifyPropertyChanged. It's not that hard, just another thing to do in your ViewModel. We must of course create the stringData variable, and we do that in MainPageViewModel.cs, like this: public class MainPageViewModel public string data = "Testing one two three"; public string stringData get { return data; } set data = value; … And if you build and run this, you’ll see that the TextBox displays the text you would expect it to display. Editing the contents of the TextBox changes the variable, which you can check if you add breakpoint somewhere and use the debugger. But let’s add a button to make that easier, and also to demonstrate how to data bind a button event, because data binding an event seems a crazy thing to be able to do. When adding Button controls to XAML, you’ll have used the Click=”” event handler, I’m sure. However, our goal is to avoid linking the XAML directly to the code behind page, so let’s try something else. Declare your button in the MainPage.xaml file like this: <Button Content="Button" Command="{Binding ClickButton}" /> Interesting, huh? We are using something called Command to bind an event handler. How is this going to work? Can you simply place a method called ClickOne() into the MainPageViewModel.cs file? Sadly not – there’s a little more to it. Let’s cheat a little, and use some already written code contained in RelayCommand.cs class. This class will be created if you create a new C# project which uses Navigation (so not the Blank project). This class is a useful lump of code which will simplify redirecting events to your View Model class. Use it by adding the following code to your MainPageViewModel.cs file: public RelayCommand ClickButton { get; private set; } public MainPageViewModel() ClickButton = new RelayCommand(() => Click()); private void Click() System.Diagnostics.Debug.WriteLine(stringData); We end up with a new event handler method, Click(), which just for kicks we ask to output the current contents of the stringData variable. This proves that not only is the event handler wired up properly, but also that the stringData is bound in TwoWay mode. Hurrah! We’ve successfully separated the XAML and its code behind page, from our View Model page. As a result, we can hand off the XAML to our designer buddy to work on, while we write unit tests around the View Model code. We could even write an entirely different XAML View, perhaps for Windows Phone, and keep the same View Model class without changing a line of code. Good work, us! Conclusion Although we’ve done some clever stuff there, you might think that working with an MVVM model is just not worthwhile for your projects, and might introduce extra complexity and weird and forced code contortions. And simply because your app is an action game or something smaller than a warehouse business database client, it's not worth the hassle. I’m not going to argue with you: IMHO it is a design pattern that grew out of Enterprise-class Windows apps that talk to databases or web services and display lots of forms and text on-screen in boring columns. However, you should be able to see how separating the user interface code from the code that makes decisions / queries the web / generates fractals / does whatever your app does, has many benefits. How nice it would be to hand the entire UI to someone else to beautify, and know it’ll still integrate perfectly into the rest of the project when you get it back. Even for smaller apps, keeping your code arranged by function, rather than stuffed into a single class, pays off when it comes to debugging, testing and re-use. I guess my point is that you shouldn't write it off as not relevant to your apps. I hope this brief introduction has be useful and understandable. Next time you select File / New / Project from your Visual Studio menu, have a think how using a design pattern might help you. Update: Now that Universal apps are a thing, MVVM can be a handy way to isolate your user interface from your app code, making it easier to write common classes that interacts with two different UI/UX models - one for phone, one for.. um.. non-phone devices like laptops, tablets etc. Just sayin'. See Also I found a lot of useful information on design patterns and MVVM on MSDN, including: Robert Green's excellent Channel 9 video which made it clear for me Robert's associated blog and sample code. MVVM from the Patterns and Practices Group MVVM and WPF
http://blogs.msdn.com/b/johnkenn/archive/2014/01/22/mvvm-for-noobs-ios-devs-and-bluffers.aspx
CC-MAIN-2014-42
en
refinedweb
In an attempt to come up with a more interesting and elaborate example of a Dumbo program than the ones that can be found here, I wrote a simple recommender for Hadoop JIRA issues yesterday. This is how I obtained the data for this recommender: $ wget -O jira_comments.xml ':'\ 'searchrequest-comments-rss/temp/SearchRequest.xml?pid=12310240' $ grep '<guid>' jira_comments.xml | sed 's/^.*HADOOP-\([0-9]*\).*$/\1/' > issues.txt $ grep '<author>' jira_comments.xml | sed 's/^.*\(.*\).*$/\1/' > authors.txt $ paste issues.txt authors.txt | egrep -v "Hudson|Hadoop QA" > comments.txt These commands lead to a file comments.txt that lists the comment authors for each JIRA issue. From this file, we can compute Amazon-style recommendations of the form “people who commented on HADOOP-X, also commented on HADOOP-Y and HADOOP-Z” using the following Dumbo program: from dumbo import sumreducer, nlargestreducer, nlargestcombiner, main from heapq import nlargest from math import sqrt def mapper1(key, value): issuenr, commenter = value.split("\t") yield (int(issuenr), commenter), 1 def mapper2(key, value): yield key[0], (value, key[1]) def reducer2(key, values): values = nlargest(1000, values) norm = sqrt(sum(value[0]**2 for value in values)) for value in values: yield (value[0], norm, key), value[1] def mapper3(key, value): yield value, key def mapper4(key, value): for left, right in ((l, r) for l in value for r in value if l != r): yield (left[1:], right[1:]), left[0]*right[0] def mapper5(key, value): left, right = key yield left[1], (value / (left[0]*right[0]), right[1]) def runner(job): job.additer(mapper1, sumreducer, combiner=sumreducer) job.additer(mapper2, reducer2) job.additer(mapper3, nlargestreducer(10000), nlargestcombiner(10000)) job.additer(mapper4, sumreducer, combiner=sumreducer) job.additer(mapper5, nlargestreducer(5), nlargestcombiner(5)) if __name__ == "__main__": main(runner) Summing it up in one sentence, this program generates a similarity value for each pair of issues that have at least one comment author in common by computing the cosine similarity between the vectors consisting of the comment counts for each possible comment author, and then retains the five most similar issues for each issue. It might look a bit complicated at first, but it really is very simple. None of the code in this program should be difficult to understand for people who are already somewhat familiar with Dumbo (because they read the short tutorial, for instance), except for the probably not very well known functions sumreducer, nlargestreducer, and nlargestcombiner maybe. The names of these functions should be rather self-explanatory though, and if that’s not enough than you can still have a look at their nice and short definitions: def sumreducer(key, values): yield key, sum(values) def nlargestreducer(n, key=None): def reducer(key_, values): yield key_, heapq.nlargest(n, itertools.chain(*values), key=key) return reducer def nlargestcombiner(n, key=None): def combiner(key_, values): yield key_, heapq.nlargest(n, values, key=key) return combiner These are not the only convenient reducers and combiners defined in dumbo.py by the way. You might also want to have a look at sumsreducer, nsmallestreducer, nsmallestcombiner, statsreducer, and statscombiner, for instance. A local run of the program on a single UNIX machine required 14 minutes to complete, while it took only 4 minutes on a modest 8-node Hadoop cluster. Hence, this example already illustrates the benefits of Hadoop quite nicely, even though the comsumed dataset is rather tiny. After using dumbo cat to save the computed output to recs.txt, you can then generate recommendations as follows: $ grep '^4304' recs.txt | \ sed "s/^\([0-9]*\)/If you're interested in HADOOP-\1, you might also like:/" | \ sed "s/(\([^,]*\), \([0-9]*\))/\nHADOOP-\2\t(\1)/g" If you're interested in HADOOP-4304, you might also like: HADOOP-5252 (0.92827912163291404) HADOOP-4842 (0.92827912163291404) HADOOP-1722 (0.86128713072409002) HADOOP-150 (0.371884218998905) HADOOP-567 (0.37146329728054306) In this case, the recommender claims that people who are interested HADOOP-4304 (“Add Dumbo to contrib”), might also be interested in - HADOOP-5252 (“Streaming overrides -inputformat option”), - HADOOP-4842 (“Streaming combiner should allow command, not just JavaClass”), and - HADOOP-1722 (“Make streaming to handle non-utf8 byte array”), which makes sense to me. Since the comments data is very sparse, the recommendations might not always make this much sense though, but that’s not the point of this post really. Instead, what you should remember from all this is that Dumbo allows you to write a scalable item-to-item collaborative filtering recommender by implementing six very simple generator functions, the longest one consisting of merely four lines of code. Nice!! [...] Consider the file commentcounts.txt, derived as follows from the comments.txt file that was generated as part of an earlier post: [...] [...] Top Posts HADOOP-1722 and typed bytes Mapper and reducer classesRandom samplingAboutRecommending JIRA issues [...] [...] Posts Recommending JIRA issuesHADOOP-1722 and typed [...] Just wanted to thank you guys for the great work on Dumbo. I did a similar Python example, but I didn’t use Dumbo because it isn’t supported yet on AWS. I’m pushing to get Dumbo included in the next release of Elastic MapReduce, it should be easier when AWS moves beyond 18.3, -Pete Glad you like Dumbo, Pete. It would definitely be great if AWS would support it, and future versions of Hadoop (>= 0.21) should indeed make that easier since they won’t need any patching to make Dumbo work. great blog, you deserve a free iPad: I don’t really like it when folks just copy and paste comments…A lot of people just comment to acquire a backlink, however, I can’t blame them because backlinking is one of the fundamentals to get a good pagerank and visitors. However if the comment is relevant to the topic, then it’s actually a far better backlink than a simple copy and paste and the importance of having loads of comments is so big. Thanks for your post. Actually for those interested in finding a professional solution for automatic issue recommendation for JIRA there is a SuggestiMate plugin for JIRA which smoothly integrates with JIRA and does exactly this: finds and recommends potentially duplicate or similar issues.
http://dumbotics.com/2009/02/24/recommending-jira-issues/
CC-MAIN-2014-42
en
refinedweb
Im going to do a walk through of my problem. It involves the scaffolding. I was trying to scaffold the basic part of a forum im going to show you exactly what i did. I started by opening cmd and typing: rails forums It created the first part. I then, created the database's Had no problem doing this. The database.yml file is perfect as well this is the database tho. PHP Code: CREATE TABLE `forums` ( `id` int(11) NOT NULL auto_increment, `title` varchar(255) collate latin1_general_ci NOT NULL default '', `description` text collate latin1_general_ci NOT NULL, PRIMARY KEY (`id`) ) ; After "Everything" was done. this is where it runs into problems I ran scaffold i typed: ruby script/generate scaffold forums it created the controllers and everything. I then went to routes.rb and edited it I deleted the index in the public html.I deleted the index in the public html.PHP Code:) # You can have the root of your site routed by hooking up '' # -- just remember to delete public/index.html. map.connect '', :controller => "forums" # Allow downloading Web Service WSDL as a file with an extension # instead of a file named 'wsdl' map.connect ':controller/service.wsdl', :action => 'wsdl' # Install the default route as the lowest priority. map.connect ':controller/:action/:id' end I then ran: ruby script/server It then gave this error and i gave this to the IRC channel for Rails. They couldnt even help me I posted my forums_controller.rb here is it: Then they asked me to post the forums.rb so here it is.Then they asked me to post the forums.rb so here it is.PHP Code: class ForumsController < ApplicationController def index list render :action => 'list' end # GETs should be safe (see [url][/url]) verify :method => :post, :only => [ :destroy, :create, :update ], :redirect_to => { :action => :list } def list @forums_pages, @forums = paginate :forums, :per_page => 10 end def show @forums = Forums.find(params[:id]) end def new @forums = Forums.new end def create @forums = Forums.new(params[:forums]) if @forums.save flash[:notice] = 'Forums was successfully created.' redirect_to :action => 'list' else render :action => 'new' end end def edit @forums = Forums.find(params[:id]) end def update @forums = Forums.find(params[:id]) if @forums.update_attributes(params[:forums]) flash[:notice] = 'Forums was successfully updated.' redirect_to :action => 'show', :id => @forums else render :action => 'edit' end end def destroy Forums.find(params[:id]).destroy redirect_to :action => 'list' end end forums.rb PHP Code: class Forums < ActiveRecord::Base end What is Wrong guys? I get that error everytime. i do something.
http://www.sitepoint.com/forums/printthread.php?t=408572&pp=25&page=1
CC-MAIN-2014-42
en
refinedweb
Hello, list! As S.G on November 7, I am also having trouble with building a DLL from the C++ interface under Windows, since I recently switched to MSVC 8 (the 2005 edition). However, I did succeed when I was using the VC 6.0 version, and I actually wrote down a recipe (below). There is a hack, though, related to the off_t __int64 definition, which I think invalidates the large file support. With the newest MS compiler, I thought I could get the off_t to work properly, that is, skip the ugly hack. So far no success, so I think I'll just use my former solution unless someone can guide me around the problem. Which is that netcdf.h states: #define off_t __int64 whereas in the file wchar.h, which appears to be included deep down in some dependencies (haven't explored the #include chain) the definition goes: #ifndef _OFF_T_DEFINED typedef long _off_t; /* file offset value */ #if !__STDC__ /* Non-ANSI name for compatibility */ typedef long off_t; #endif #define _OFF_T_DEFINED #endif ...Which generates the following errors: 1>c:\program files\microsoft visual studio 8\vc\include\wchar.h(485) : error C2632: 'long' followed by '__int64' is illegal 1>c:\program files\microsoft visual studio 8\vc\include\wchar.h(485) : warning C4091: 'typedef ' : ignored on left of 'long' when no variable is declared ...occuring under compilation of netcdf.cpp (ncvalues.cpp compiles OK). Obviously, the definitions crash. I've tried to switch places of some #include's and also tried to #undef off_t, but preprocessor directives are not what I know best. Here is what worked in VC 6.0. It also compiles under MSVC 8.0 (2005), but I haven't tested the functionality. The oold one worked fine. I have not tried MSVC 7.0 (2003). (Most of the text below was written a year ago, for VC60 and NetCDF 3.6.1.beta3) Being a slave of MS and GUI, I managed to get the netCDF C++ interface to compile under Windows, from "within" MS Visual Studio 6.0. (That is, using the GUI and wizards, no makefiles or configure tricks.) I have not compiled the whole C library, but using the pre-built netCDF works fine. I do not know if this is the only or even best solution, but it works. I had to do a modification which I guess disqualifies me from using the >2GiB files, and I have no idea of what happens if I try reading a larger file. 1: Get the 3.6.1 prebuilt binaries from the <> WEB site. This does not include the C++ interface. 2: Copy the .exe, .dll and .lib files to the directory where you put your compiled applications. 3: Find, on the same site, the full source distribution This contains both the netCDF C code (in \libsrc) and the C++ interface code (in \cxx). 4: Build a new, empty C++ dll project using the Visual Studio wizard. (I call this netCDFsh) 5: Copy the files \netcdf-3.6.1\src\cxx\ncvalues.cpp \netcdf-3.6.1\src\cxx\ncvalues.h \netcdf-3.6.1\src\cxx\netcdf.cpp \netcdf-3.6.1\src\cxx\netcdfcpp.h and \netcdf-3.6.1\libsrc\netcdf.h to your \netCDFsh project directory, and include them in your project. 6: Be sure to include the definition DLL_NETCDF in the Project Settings, tab C/C++, Category Preprocessor. 7: Still in Project Settings, tab Link, category Input, add netcdf.lib under Object/library modules, and the path where you put it, under Additional Library Path. 8: Still in Project Settings, specify your output path to the directory where you keep your binary executables (and where you put the pre-built netcdf files). 9: In the netcdf.h, comment out the #define off_t __int64 on (or around) line 212, and add a comment at the file top that you did so. This is because VC60 doesn't react well to 64 bit integers, and means that you will not be able to use this extended functionality of netCDF. NB! Beware that you now change the code behind the pre-built C netCDF.dll, so keep a backup of the original file in case you want to try compiling this library yourself. NB2: I have absolutely no idea of which functions in the C++ interface that will not work without this declaration of off_t. 10: In the netcdfcpp.h file, add the declaration __declspec(dllexport) to the class NcFile declaration, so that it reads: class __declspec(dllexport) NcFile Repeat this for all the other classes in this file. This is because you compile the C++ interface into a new dll, and need the compiler to export these classes to a .lib file. 11: Done. Build the netCDFsh dll, and verify that netCDF.dll and netCDF.lib appears in your executable directory. 12: Code your application. Link in both netCDF.lib and netDCFsh.lib, #include "netCDFsh\netCDFcpp.h" and you should be able to construct NcFile objects and extract information from the files just by ordinary class member notation. I guess it is possible to take all the four C++ files directly into your application, so you can do without the __declspec's and the netCDFsh dll, but I prefer to separate my own from other people's code. Good luck! Sjur :-) On Tue, 07 Nov 2006 07:27:41 -0700, Ed Hartnett wrote: > >Howdy! > >The challenge is, no one has ever ported the C++ API to the windows >programming environment. > >As you noted, I did port the C API to MS Visual Studio, and that >works. > >But I have never had the time or the strong need to put the C++ API >under Visual Studio. I don't know how hard it would be to get it >working, perhaps easy, perhaps not, however I am sure that I will not >be able to try to tackle that problem any time soon. > >You could just use the C library from C++ on widows, or you can try to >get a visual studio solution together for the C++ API, which would >then produce a C++ library DLL. If you do this, please share your >results with me, and I can make it available in future distributions. > >Thanks! > >Ed > Sjur Kolberg SINTEF Energiforskning A/S Sem Sælands vei 11 7465 Trondheim tlf. 73 59 72 78 fax 73 59 72 50 ============================================================================== To unsubscribe netcdfgroup, visit: ============================================================================== netcdfgrouplist information: netcdfgrouplist netcdfgrouparchives:
http://www.unidata.ucar.edu/mailing_lists/archives/netcdfgroup/2006/msg00230.html
CC-MAIN-2014-42
en
refinedweb
I would like to know if it is a way to up load the positions for an object from an external file, for each frame You could certainly do this if you wrote your own python script. hmmm… pseudocode might look something like: import blender text = “wherever the text file you want to read is” delimiter = " what you want to stop the file from being read/split which frame from eg a “;” , a “~”, a “,”, etc" frame = Blender.Get(“curframe”) keyframe = (set keyframe API here) the next step would be to read data and set keyframes based on the text file I don’t know if that made any sense:eek: please let me know if that helped or just confused things. Thanks -amdbcg
https://blenderartists.org/t/external-source/452914
CC-MAIN-2021-04
en
refinedweb
So i randomly found this usefull script on the internet (blenderist.com). It’s for marker renaming and i’m going to create add-on based on this script. But i want it to be activated using a button, somewhere near the refresh sequencer button. Can somebody help me how to do it? I hope that i can understand a little bit about logic and scripting in Blender. I really appreciate it. Thanks a ton! import bpy i = 1 for Marker in bpy.context.scene.timeline_markers: Marker.name=str(i) i=i+1
https://blenderartists.org/t/teach-me-how-to-create-marker-renamer-add-on/647683
CC-MAIN-2021-04
en
refinedweb
Registering your handler You can register the script that handles warmup requests in your project's app.yaml file. For example: inbound_services: - warmup handlers: - url: /_ah/warmup script: main.py login: admin This example registers a handler to listen to warmup requests to the /_ah/warmup request path with the main.py file. Creating your handler Create a handler that will process the requests that are sent to /_ah/warmup. Your handler should perform any warmup logic that is needed by your app. The following example builds on the previous example: import webapp2 class MyWarmUpCode(webapp2.RequestHandler): """ This class handles the warmup request. You should add any code that you need to execute in the `get` method, such as populating caches, and ensure that you return a successful HTTP response. """ def get(self): # Your warmup logic goes here. # Return a successful response to indicate the logic completed. self.response.headers['Content-Type'] = 'text/plain' self.response.write('Warmup successful') # ... application = webapp2.WSGIApplication( [ ('/_ah/warmup', MyWarmUpCode), # Other handlers # ... ] ) What's next You might want to store values in an in-memory datastore such as Memcache, giving your app fast, query-less access to data. For example, if you build and store a list of the current trending articles for your site, you can build that list in the warmup and store it in Memcache. When a user request comes in, App Engine doesn't need to perform any datastore queries and the application can serve the user's request faster.
https://cloud.google.com/appengine/docs/standard/python/configuring-warmup-requests?hl=no
CC-MAIN-2021-04
en
refinedweb
Compiling the below code in gcc (gcc -Wall foo.c) yields the following warning message: foo.c: In function 'main': foo.c:9: warning: implicit declaration of function 'strlen' foo.c:9: warning: incompatible implicit declaration of built-in function 'strlen' When I compile using Intel C++ compiler (icpc -Wall foo.c) I get the below error message: foo.c(9): error: identifier "strlen" is undefined len = strlen(foo); When I compile using Intel C compiler (icc -Wall foo.c) no warning messages are displayed. Why is this the case? #include <stdlib.h> int main (int argc, char **argv) { char foo[] = "abcd"; int len; len = strlen(foo); return 0; } We don't have the exact same warnings as gcc, though we try. If you want to see the message about strlen's declaration when using icc, use this option: -ww:266 icc -c -ww:266 fu.c fu.c(24): warning #266: function "strlen" declared implicitly len = strlen(foo); If you use either g++ or icpc to compile it, the fact that strlen isn't declared is emitted as an error, no implicit declaration for strlen in C++. --Melanie Thanks Melanie for the response on warning compatibility with ICC. Just to elaborate further, ICC is command line, source and binary compatible. Although not warning to warning compatible with GCC, ICC often produces a high number of warnings (instead of silently accepting) thereby detecting source oddities that can end up as a potential bug. The user can then decide on whether the warning is an issue or not and disable accordingly. _Kittur
https://community.intel.com/t5/Intel-C-Compiler/implicit-declaration-warning-not-produced-using-C-compiler/m-p/1010196
CC-MAIN-2021-04
en
refinedweb
Using requests in Python3 Windows via Pycharm, and receiving SSL Module Not Available Error I’ve spent hours trying to figure out what could be causing this. I’ve reinstalled Anaconda, and I am completely stuck. When running the following def Earlybird(daycount): url = '' response = requests.get(url) print(response) Earlybird() I receive the error requests.exceptions.SSLError: HTTPSConnectionPool(host='msft.com', port=443): Max retries exceeded with url: / (Caused by SSLError("Can't connect to HTTPS URL because the SSL module is not available.")) I have no idea what could be causing this. As Anaconda is a recent install, I would assume everything would be up-to-date, so I’m unsure if this is pointing to the wrong SSL ? I am a bit new to python, so thanks for your patience. – Kenil Vasani Don’t know if this has been solved yet but I was getting similar problems with Anaconda python 3.7.3 and Idle on Windows 10. Fixed it by adding: to the PATH variable.
https://www.errorcorner.com/question/requests-caused-by-sslerrorcant-connect-to-https-url-because-the-ssl-module/
CC-MAIN-2021-04
en
refinedweb
How to Mock Environment Variables in pytest2020-10-13 Sometimes tests need to change environment variables. This is fairly straightforward in pytest, thanks to os.environ quacking like a dict, and the unittest.mock.patch.dict decorator/context manager. (If you’re not using pytest, or use TestCase classes with pytest, see the unittest edition of this post.) mock.patch or monkeypatch ? pytest comes with a monkeypatch fixture which does some of the same things as mock.patch. This post uses mock.patch, since it’s a more powerful and general purpose tool. But you might prefer monkeypatch - check out the monkeypatch documentation for environment variables. Adding Environment Variables If you want to write a test that sets one or more environment variables, overriding existing values, you can use mock.patch.dict like this: import os from unittest import mock from example.settings import Colour, get_settings @mock.patch.dict(os.environ, {"FROBNICATION_COLOUR": "ROUGE"}) def test_frobnication_colour(): colour = get_settings().frobnication_colour assert colour == Colour.ROUGE You can apply this to all tests in a module by creating a local auto-used pytest fixture that uses mock.patch.dict: import os from unittest import mock import pytest @pytest.fixture(autouse=True) def mock_settings_env_vars(): with mock.patch.dict(os.environ, {"FROBNICATION_COLOUR": "ROUGE"}): yield def test_frobnication_colour(): assert os.environ["FROBNICATION_COLOUR"] == "ROUGE" def test_frobnication_shade(): assert os.environ["FROBNICATION_COLOUR"] == "ROUGE" Dynamic Values If you don’t know the keys or values you want to mock at import time, you’ll need to use the context manager form of mock.patch.dict within your test function: import os from unittest import mock from example.settings import Colour, get_settings from tests.fixtures import get_mock_colour def test_frobnication_colour(): with mock.patch.dict(os.environ, {"FROBNICATION_COLOUR": get_mock_colour()}): colour = get_settings().frobnication_colour assert colour == Colour.ROUGE Clearing If you want to clear everything from os.environ so only the given variables are set, you can do so by passing clear=True to mock.patch.dict: import os from unittest import mock from example.settings import get_settings @mock.patch.dict(os.environ, {"FROBNICATION_COLOUR": "ROUGE"}, clear=True) def test_frobnication_colour(): settings = get_settings() assert settings.modified_names == {"FROBNICATION_COLOUR"} Removing If you want to remove only a few variables, it gets a little more tricky. mock.patch.dict doesn’t have a way of removing select keys, so you need to build a dictionary of the keys to preserve, and use that with clear=True: import os from unittest import mock from example.settings import get_settings def test_frobnication_colour(): names_to_remove = {"FROBNICATION_COLOUR"} modified_environ = {k: v for k, v in os.environ.items() if k not in names_to_remove} with mock.patch.dict(os.environ, modified_environ, clear=True): settings = get_settings() assert settings.modified_names == set() Fin I hope this helps you with your testing journey, —Adam Working on a Django project? Check out my book Speed Up Your Django Tests which covers loads of best practices so you can write faster, more accurate tests. One summary email a week, no spam, I pinky promise. Related posts: Tags: python
https://adamj.eu/tech/2020/10/13/how-to-mock-environment-variables-with-pytest/
CC-MAIN-2021-04
en
refinedweb
Hello, Does pycharm support IronPython code completion, namespace import and so on for IronPython code? Recently was announce of IronPython suport. But i can't see it in editor. What can i do to obtain IronPython code completion and namespace import in PyCharm editor? I use PyCharm as external powerful Iron Python scripts editor. And then second application is used for scripts running. Hello Vitalik, In order for all of these features to work, you need to put the caret on a CLR import reference (for example, 'import System') and invoke the quickfix to generate stubs for the .NET assembly. -- Dmitry Jemerov Development Lead JetBrains, Inc. "Develop with Pleasure!"
https://intellij-support.jetbrains.com/hc/en-us/community/posts/206583835-Iron-Python
CC-MAIN-2021-04
en
refinedweb
Versioning The versioning bundle will create snapshots for every file, upon every update received, or when it is deleted. It is useful when you need to track the history of files or when you need a full audit trail. Installation To activate versioning simply add Versioning to Raven/ActiveBundles configuration in the global configuration file or setup a new file system with the versioning bundle turned on using API or the Studio. Learn how to create a file system with versioning enabled using the Studio here. Configuration By default, the Versioning bundle will track history for all files and never purge old revisions. This may be easily configurable by changing appropriate configuration item: await store .AsyncFilesCommands .Configuration .SetKeyAsync( "Raven/Versioning/DefaultConfiguration", new FileVersioningConfiguration { Id = "Raven/Versioning/DefaultConfiguration", MaxRevisions = 10 }); Such a default configuration will setup the versioning bundle to version all files ( Exclude = false) and keep only up to 10 revisions ( MaxRevisions = 10). It is possible to override the default behavior for files located in a particular directory. For example, let's say that we don't want to version files in /documents/temp folder. To achieve that we need to set the following configuration: await store .AsyncFilesCommands .Configuration .SetKeyAsync( "Raven/Versioning/documents/temp", new FileVersioningConfiguration { Id = "Raven/Versioning/documents/temp", Exclude = true }); This will cause that no file located under specified directory and its subfolders will be versioned because Exclude = true. The configuration naming convention is Raven/Versioning/[directory/path]. You can create multiple configurations for different nesting levels, the versioning bundle will look for the most specific one. For example the above versioning configuration will disable versioning for files under /documents/temp/drafts too. However you can also set the following configuration: await store .AsyncFilesCommands .Configuration .SetKeyAsync( "Raven/Versioning/documents/temp/drafts", new FileVersioningConfiguration { Id = "Raven/Versioning/documents/temp/drafts", Exclude = false, MaxRevisions = 5 }); to enable versioning for this folder. There will be created no more than 5 revisions for files from this directory. Apart from MaxRevisions there are a few more options: - ExcludeUnlessExplicit set to truedisables versioning for impacted files unless file metadata at the time of saving contains the key Raven-Create-Version. This key is transient and it is removed from metadata before put. Default: false. - PurgeOnDelete determines whether revisions should be deleted if a related file is deleted. Default: false. - ResetOnRename indicates if the versioning should be reset on a file rename. Default: true, means that the last existing revision will become first revision while the other ones will be deleted. If you set this option to falsethen revisions will be renamed according to the new name of the related file. Client integration The versioning bundle also has a client side part, which you can access by adding Raven.Client.FileSystem.Bundles.Versioning namespace. Then, you can access past revisions of a file using the following code: using (var session = store.OpenAsyncSession()) { FileHeader[] revisions = await session .GetRevisionsForAsync( "/documents/temp/drafts/1.txt", start: 0, pageSize: 5); }
https://ravendb.net/docs/article-page/3.5/python/file-system/server/bundles/versioning
CC-MAIN-2021-04
en
refinedweb
Newton Excel Bach is aimed at people who work with numbers without a $ on the front. Posts will alternate between Excel and engineering/science topics, with the occasional post related to neither when I feel like it. Contact e-mail: [email protected] About Interactive Design Services Interactive Design Services is a Sydney based structural design consultancy specialising in the analysis, design and review of bridges, buried structures, retaining walls and related civil engineering structures. More details may be found at: Interactive Design Services A good day ; I want to say that what is here is the produce of a shinely mind . I have a very big need of your help ! … I try to get the solution for an UDF which work in array ; how can I contact you , if you allow me ? Please very much for help , I have a very big need to have this UDF , and it seems nobody can give me the right solution anymore ; I put here a link where is described all my problem : The thread have name UDF …VBA Formula built …please help , username ytayta555 ; Thank you very much , and I wait impatient your big help for me . Respectfully Dombi – thanks for your comments. I have left a reply in the thead you linked to. I didn’t expect to get response from a great personality like you , mr. Doug Jankins , I’m really surprised . Great atitude … Now , my problem is solved 98% ; I hope and please you to help me and to resolve the last part of 2% . Really , I just wonder what can I do to can do for you what you just done fore me . What a benefit for humanity to have so shinely and creative minds … Hi. Hi João – I’m not exactly sure how your line of passes system would work at the moment, but if you would like to send some sketches and/or worksheets to my Gmail account (Dougaj4 at the usual gmail) I will have a look at them when I have time (pretty busy at the moment, so it may not be straight away). I’d be interested to see your line length code as well if you would like to send it. Thanks for the kind comments. Doug Dear Mr Jenkins, We identified your site as one of the leading information sources on the Internet about Microsoft Excel software. We would like to take this opportunity to introduce you our site, SpreadsheetZONE. SpreadsheetZONE, a content partner of Microsoft Office Online, is a growing repository of free online Microsoft Excel templates available for everyone for personal or business usage. Most of the templates are developed by SpreadsheetZONE Team, in various categories including finance, sales, education, and project management. Our library grows everyday as new templates are published, and number of our users keeps increasing. Please visit our site at and let us know what you think. We always welcome suggestions from Excel community to improve our site and services. Thank you, SpreadsheetZONE Team HiF. +1 Hi Beatrix – thanks for the comments. Could you provide a bit more detail about what you are wanting to do with the intersection calculation, and what data you are working with? There is a spreadsheet on the blog that calculates intersections of lines and/or circles (IP.xls) which may help, but assuming you are wanting to calculate if a moving ball will intersect the path of a moving player it may not be so straightforward. I’d be interested to see your code. If you would like to e-mail to [email protected] I will have a look. Doug I’m a civil engineer in the states and a programming enthusiast. Have you ever thought about setting up a repository for user-submitted code, feedback, or requests? I’m sure a collection of your readers would be interested in the challenge, including myself. Good idea! I’ll set up a separate page for readers programs and ask for submissions when it is ready. Happy New Year! Regards Alfred You should consider adding Ron DeBruin’s web page to you list. Happy New Year! Regards Alfred Hi Doug, Excellent job on the blog. I find it really helpful and educative, I can learn a lot here. FYI, I also work at engineering consultant specializing in civil engineering works such as structural assessment and design of buildings, bridges, etc. It is based in Jakarta, Indonesia. When i have free time, i sometime try to make a useful spreadsheet such as beam/column/others design (according to Indonesian building code), ETABS/SAP output processor, etc. Currently I am working on Bill of Quantity spreadsheet to calculate material quantity (concrete and steel) with a good precision (detail calculation) to be used in Value Engineering works. Unfortunately i have limited knowledge on VBA programming so i keep stumbling on a problem from time to time. Hopefully i can “steal” your time once in a while to discuss about it. I like this blog and i’ll be glad to take an active role here. I’d gladly share my knowledge and my spreadsheets if it can be of any help here. Regards Bob …this blog helped me develop my skills in VBA in relation to structural engineering, big thanks to mr. Jenkins.. I am also a programming enthusiast here in the Philippines and I was hoping to find a company who gives interest with this kind of trade.. Thank you for taking the time and energy to release this very high quality information. I really enjoy reading your posts and following along. Your code is as previously mentioned is of very high quality. Your comment sections are simply lovely. Very bright and intelligent spot on comments. As someone stated previously Shiny! Doug – Great blog with tons of interesting info. Thanks for posting regularly. Excel is a great tool for most “day-to-day” math and your blog provides several useful aspects. All the best! Doug, you have a lot of great information here. I was wondering under what terms you release your excel sheets and scripts? I am working on a column designer web app / side project of mine, and I would like to use some of your PM interaction code as reference when I write the backend algorithms. It will be server side (not publicly visible) and in a different language (go vs VBA). Would this be an acceptable use to you? Best Regards. Jeremy – Most of the spreadsheets have an “about” page with a licence and disclaimer statement. In summary this says you can use the code as you wish, but it comes with absolutely no warranty. Hi Doug, thanks for your amazing blog and the wealth of knowledge you share in your blog. I am a civil engineer from Singapore, and I’ll be starting work in the Melbourne Metro Tunnel Project (MTP) this August 2018. I do write some VBA codes and functions to help me with my work too. One Excel VBA function which I completed some time ago, which I found to be quite a challenge to create, was a “ReturnFindAddress” function, which would return (either as a string or an array) the cell addresses of the search results of a specified search string which I supply to the function. Anyway I just wanted to say Hi, and let you know that I appreciate your blog as it lets me know what is possible in Excel, especially in engineering applications. Cheers and have a good day! Thanks Jay, I hope all goes well in Melbourne! Hello Doug, Hope you are well. Noticed your replies about VBA and Python on Quora. Do you provide consulting? We have an Excel file that is taking a several seconds to compute, which creates a poor user experience. We have developed a lean Python script using Numpy, but we are having issues running the script in Excel (the Python pluggin is difficult to install). Could we have a call? Hope you can help. Best, Daniel P.S. – Here is our company website:
https://newtonexcelbach.com/about/?like_comment=3800&_wpnonce=1b1ae967b9
CC-MAIN-2021-04
en
refinedweb
Heads up! To view this whole video, sign in with your Courses account or enroll in your free 7-day trial. Sign In Enroll Preview String Concatenation6:39 with Jay McGavren You can join strings together using string concatenation. IRB - IRB stands for "Interactive RuBy". - You can run it by clicking in your console or terminal and typing irbat the system prompt. - When we run it, it'll show you a prompt where you can type Ruby expressions one at a time, hitting Enter after each. - IRB will immediately show you the result of each expression. You don't need to call putsor anything. 2.3.0 :001 > 1 + 2 => 3 2.3.0 :002 > Time.now => 2017-09-02 13:31:38 -0700 - When you're done and you're ready to exit IRB, type exitand press Enter. You'll be returned to the system prompt. 2.3.0 :003 > exit $ - IRB is a great way to try code out and see what it does, and even professional Ruby developers use it as a way to quickly test out ideas. String Concatenation So now that we know how irb works, let's use it to try out string concatenation. $ irb 2.3.0 :001 > "a" + "b" => "ab" 2.3.0 :002 > "some words" + "more words" => "some wordsmore words" 2.3.0 :003 > "some words" + " " + "more words" => "some words more words" 2.3.0 :004 > myvar = "a string" => "a string" - You can concatenate strings in variables 2.3.0 :005 > myvar + " abc" => "a string abc" - Concatenation gives a new string, it doesn't change the string in the variable 2.3.0 :006 > myvar => "a string" - To change the variable's value, use an abbreviated assignment operator, which we'll talk more about soon myva2.3.0 :007 > myvar += " abc" => "a string abc" 2.3.0 :008 > myvar => "a string abc" myva2.3.0 :009 > myvar += " def" => "a string abc def" myvar 2.3.0 :010 > myvar => "a string abc def" - Strings can only be concatenated together with other strings. Anything else, like a number, will result in an error. - We'll be showing you a solution for this shortly. 2.3.0 :001 > 1 + "a string" TypeError: String can't be coerced into Fixnum from (irb):1:in `+' from (irb):1 from /Users/jay/.rvm/rubies/ruby-2.3.0/bin/irb:11:in `<main>' 2.3.0 :002 > "a string" + 1 TypeError: no implicit conversion of Fixnum into String from (irb):2:in `+' from (irb):2 from /Users/jay/.rvm/rubies/ruby-2.3.0/bin/irb:11:in `<main>' Updating the widget store - Using string concatenation to fix our askmethod - We need to print a space following the question we ask the user - We can do this using string concatenation def ask(question) print question + " " gets end puts "Welcome to the widget store!" answer = ask("How many widgets are you ordering?") - Let's print what the user entered so they can confirm it's correct. answer = ask("How many widgets are you ordering?") puts "You entered" + answer + "widgets" - Output: You entered11 widgets - Oops! We need to add spaces surrounding answer, so fix that: puts "You entered " + answer + " widgets" - Output: You entered 11 widgets - You may be wondering why we didn't get an error, since strings can only be concatenated with other strings. The reason is, the value in the answervariable is a string. The getsmethod always returns strings. So even though the user entered a number, it's treated as a string. Eventually we'll have to convert it to an actual number, which we'll see how to do later. - It still skips to a new line after printing answer. That's something we'll have to fix later as well. In our widget store program, the question that we're asking the user is running 0:00 right up against the space where they're suppose to type their answer. 0:04 In order to fix this, we're going to be to take the question that 0:07 we're asking the user, and add a space on to the end of it. 0:10 We can do this through ruby string concatenation or the joining of strings. 0:13 We'll show you how to do string concatenation in our main program 0:18 in a bit, but first let's try it out in a different way. 0:22 I want to show you a separate program that gets installed along with Ruby called irb. 0:25 irb stands for interactive Ruby and we can launch it by clicking down in our console 0:31 and typing the letters irb and pressing Enter. 0:36 When we run irb it'll show you a prompt where you can 0:39 type Ruby expressions one at a time hitting Enter after each. 0:42 Irb will immediately show you the result of each expression, 0:50 you don't need to call puts or anything. 0:53 It's a great way to try code out and see what it does, and 0:55 even professional Ruby developers use it as a way to quickly test out ideas. 0:58 So now that we know how irb works, let's use it to try out string concatenation. 1:02 I'm gonna resize the console so that it has a little more room on the screen. 1:07 You concatenate strings together using the plus operator. 1:13 So let's try typing one string, the plus operator and 1:16 a second string that we wanna join onto it. 1:20 You can see the result is the concatenated string, ab. 1:22 Let's try that again with slightly longer strings. 1:26 So we'll try a string that consists of some words + more words. 1:29 And you can see that they got joined together without any spaces between them. 1:38 That's something you need to be careful of if you're using actual English words. 1:41 You need to be sure to include spaces in the concatenated version. 1:47 So we'll concatenate three strings together. 1:53 Our first string, a string consisting of a single space and our second string. 1:56 And now, everything's spaced properly. 2:03 If you concatenate one string onto another that's stored in a variable, 2:05 it won't affect the string that's stored in the variable. 2:09 Let's try creating a variable named myvar, and we'll store a string in it. 2:12 And now, let's try concatenating another 2:20 string onto the string in myvar, myvar + abc. 2:25 And you can see that the result is concatenated string, a string abc. 2:30 But if we take a look at the contents of myvar, which in irb you can just 2:35 type myvar and it will print what myvar contains for you. 2:40 You can see that myvar is ineffective, it still contains just a string. 2:46 To concatenate the string and actually change the value that's held in 2:51 the variable, we can use an abbreviated assignment operator. 2:54 We'll talk about those operators more later, but 2:58 let's just do a quick demonstration. 3:01 So myvar, and we use the abbreviated += 3:03 assignment operator, and we'll concatenate the same string on as we did before. 3:07 myvar += abc. 3:12 And you can see the result is a string abc. 3:14 But if we type just myvar to look at its contents, 3:17 we can see that its contents have been updated as well. 3:21 And if we did that again with a different string, if we say myvar += def, we can see 3:23 that another string has been concatenated on to the end of the first one. 3:30 And that the contents of myvar have been updated with that as well. 3:34 We now have a string, the first concatenated string abc and 3:39 the second concatenated string def. 3:43 Strings can only be concatenated together with other strings. 3:46 Anything else like a number will result in an error. 3:50 So if we were to take the number 1 and 3:53 try to concatenate a string on to the end of it, we'll get an error. 3:56 We'll also get an error if we take a string and 4:02 try to concatenate a number onto that. 4:06 We'll be showing you a solution for this situation shortly. 4:09 When you're done and you're ready to exit irb, type exit and 4:12 press Enter, you'll be returned to the system prompt. 4:15 Let's resize our console window back where it was, and 4:18 bring our widgets.rb code back up. 4:23 So now let's see if we can use string concatenation to fix our ask method. 4:26 As we mentioned, the question that we're asking the user is running right up 4:30 against the space where they're supposed to type their response. 4:34 We can fix this by concatenating a space character on to the end of the question. 4:37 Let's try running this again now. 4:42 So we'll say ruby space widgets.rb. 4:45 And we'll get asked as before, how many widgets are you ordering? 4:51 But notice that there's now a space between the question and the cursor. 4:53 Now let's try typing our response as we did before, and 4:59 you'll notice that it's spaced properly now, thanks to string concatenation. 5:03 It looks like there's another improvement we can make here. 5:07 Right now, we're just printing out whatever the user enters with no 5:10 explanation. 5:13 So let's incorporate that into a more readable message. 5:14 Instead of puts answer, let's say, puts, 5:17 You entered, and concatenate that with answer, 5:22 and concatenate that with widgets. 5:27 So if they enter 8 widgets it'll say, you entered 8 widgets. 5:32 Let's try running this again. 5:37 But we noticed there is a problem. 5:42 We forgot to add spaces surrounding answer here so we wind up with 5:44 you entered running right up against the user's answer here in the output. 5:48 So let's go back into the code and add spaces surrounding the answer variable. 5:52 So You entered space, answer space widgets, and we'll enter 8 again. 5:57 There's a space here before the users answer, and we can see another space 6:05 down here on the second line, but why is there a line break in the middle of this? 6:09 You may also be wondering why we didn't get an error, 6:13 since strings can only be concatenated with other strings. 6:16 The reason is, the value in the answer variable is a string. 6:19 The gets method always returns strings, so 6:23 even though the user entered a number, it's treated as a string. 6:26 Eventually, we'll have to convert it to an actual number, 6:30 which we'll see how to do later. 6:32 We'll also see how to fix it skipping to a new line after printing the answer. 6:34
https://teamtreehouse.com/library/string-concatenation?t=0
CC-MAIN-2021-04
en
refinedweb
I’d like to encourage the general Flex community to take a second to build a quick custom preloader for the project that they are currently working on. It doesn’t take but a minute or two and it will distinguish your application from the sea of other Flex applications out there right from the start. One of the easiest ways I’ve found to build a custom preloader for your Flex application is to start in Flash Professional. I usually just build a nice animation over 100 frames. If you are using some text that gives the amount loaded then you’ll need to make sure you set that to dynamic text so that we can manipulate that information later. Make sure you export your Custom Preloader class for action script. ( I’m in CS3 and you just go to the Library, right click on the preloader and change the linkage to export for ActionScript. Next export that same Library preloader object as a SWC. Now over in Flex Builder we’ll need to import that SWC. You can do this by dropping the SWC in your libs folder. Or you can specify the location of the SWC in the project properties section. File – Properties – Build Path – Library path – Add SWC. Keeping this quick, I’d suggest just extending the DownloadProgressBar Your preloader class will be able to take advantage of some of the code that’s already baked into that component. In the class constructor add the custom preloader. When the ‘on added to stage’ event fires center the preloader and be sure to stop the preloader from running. We’ll use the pre-baked ‘set preloader’ method to listen for some events. The preloader class passes in a reference to itself so that it can listen for events like progress or completion. public override function set preloader(preloader:Sprite):void { preloader.addEventListener(ProgressEvent.PROGRESS, onProgress); preloader.addEventListener(FlexEvent.INIT_COMPLETE, intComplete); } After the application has loaded we just need ot fire the complete event. Otherwise our viewers will be able to see our preloader but not ever view the application. protected function intComplete(event:FlexEvent):void { dispatchEvent(new Event(Event.COMPLETE)); } We’re also listening for the progress event. We use that to update the current frame of our flash preloader, additionally we can update our dynamic text input. (here called ‘percent’) protected function onProgress(event:ProgressEvent):void { customPreloader.percent.text = Math.ceil(event.bytesLoaded / event.bytesTotal *100).toString() + '%'; customPreloader.gotoAndStop( Math.ceil(event.bytesLoaded / event.bytesTotal *100)); } Here’s the entire preloader class: package com.alagad.view.preloader { import flash.display.Sprite; import flash.events.Event; import flash.events.ProgressEvent; import mx.events.FlexEvent; import mx.preloaders.DownloadProgressBar; public class RedPreloader extends DownloadProgressBar { protected var customPreloader:CustomPreloader; public function RedPreloader() { customPreloader = new CustomPreloader(); addEventListener(Event.ADDED_TO_STAGE, onAdded); addChild(customPreloader); } protected function onAdded(event:Event):void { customPreloader.stop(); /put in in the upper middle customPreloader.x = stage.stageWidth * 0.5 + 65; customPreloader.y = stage.stageHeight * 0.5 - 160; } public override function set preloader(preloader:Sprite):void { preloader.addEventListener(ProgressEvent.PROGRESS, onProgress); preloader.addEventListener(FlexEvent.INIT_COMPLETE, intComplete); } protected function intComplete(event:FlexEvent):void { dispatchEvent(new Event(Event.COMPLETE)); } protected function onProgress(event:ProgressEvent):void { customPreloader.percent.text = Math.ceil(event.bytesLoaded / event.bytesTotal * 100).toString() + '%'; customPreloader.gotoAndStop(Math.ceil(event.bytesLoaded / event.bytesTotal * 100)); } } } The last thing to do is quickly add the class that we made to our main application tag. If your looking for inspiration for preloaders then you need to check out Big Spaceships new site called PrettyLoaded () If your looking for a nice video tutorial covering some of the stuff in this blog post, be sure to check out Lee Brimelow’s website where he shows off how to build a custom Flex Preloader in this episode.
https://doughughes.net/2009/11/12/custom-preloaders-for-flex/
CC-MAIN-2018-17
en
refinedweb