_id
stringlengths 2
6
| partition
stringclasses 3
values | text
stringlengths 4
46k
| language
stringclasses 1
value | title
stringclasses 1
value |
---|---|---|---|---|
d13201 | train | Maybe you need a concurrent dictionary of blockingCollection
ConcurrentDictionary<int, BlockingCollection<string>> mailBoxes = new ConcurrentDictionary<int, BlockingCollection<string>>();
int maxBoxes = 5;
CancellationTokenSource cancelationTokenSource = new CancellationTokenSource();
CancellationToken cancelationToken = cancelationTokenSource.Token;
Random rnd = new Random();
// Producer
Task.Factory.StartNew(() =>
{
while (true)
{
int index = rnd.Next(0, maxBoxes);
// put the letter in the mailbox 'index'
var box = mailBoxes.GetOrAdd(index, new BlockingCollection<string>());
box.Add("some message " + index, cancelationToken);
Console.WriteLine("Produced a letter to put in box " + index);
// Wait simulating a heavy production item.
Thread.Sleep(1000);
}
});
// Consumer 1
Task.Factory.StartNew(() =>
{
while (true)
{
int index = rnd.Next(0, maxBoxes);
// get the letter in the mailbox 'index'
var box = mailBoxes.GetOrAdd(index, new BlockingCollection<string>());
var message = box.Take(cancelationToken);
Console.WriteLine("Consumed 1: " + message);
// consume a item cost less than produce it:
Thread.Sleep(50);
}
});
// Consumer 2
Task.Factory.StartNew(() =>
{
while (true)
{
int index = rnd.Next(0, maxBoxes);
// get the letter in the mailbox 'index'
var box = mailBoxes.GetOrAdd(index, new BlockingCollection<string>());
var message = box.Take(cancelationToken);
Console.WriteLine("Consumed 2: " + message);
// consume a item cost less than produce it:
Thread.Sleep(50);
}
});
Console.ReadLine();
cancelationTokenSource.Cancel();
By this way, a consumer which is expecting something in the mailbox 5, will wait until the productor puts a letter in the mailbox 5.
A: You'll need to write your own adapter class - something like:
public class ConcurrentDictionaryWrapper<TKey,TValue>
: IProducerConsumerCollection<KeyValuePair<TKey,TValue>>
{
private ConcurrentDictionary<TKey, TValue> dictionary;
public IEnumerator<KeyValuePair<TKey, TValue>> GetEnumerator()
{
return dictionary.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
public void CopyTo(Array array, int index)
{
throw new NotImplementedException();
}
public int Count
{
get { return dictionary.Count; }
}
public object SyncRoot
{
get { return this; }
}
public bool IsSynchronized
{
get { return true; }
}
public void CopyTo(KeyValuePair<TKey, TValue>[] array, int index)
{
throw new NotImplementedException();
}
public bool TryAdd(KeyValuePair<TKey, TValue> item)
{
return dictionary.TryAdd(item.Key, item.Value);
}
public bool TryTake(out KeyValuePair<TKey, TValue> item)
{
item = dictionary.FirstOrDefault();
TValue value;
return dictionary.TryRemove(item.Key, out value);
}
public KeyValuePair<TKey, TValue>[] ToArray()
{
throw new NotImplementedException();
}
}
A: Here is an implementation of a IProducerConsumerCollection<T> collection which is backed by a ConcurrentDictionary<TKey, TValue>. The T of the collection is of type KeyValuePair<TKey, TValue>. It is very similar to Nick Jones's implementation, with some improvements:
public class ConcurrentDictionaryProducerConsumer<TKey, TValue>
: IProducerConsumerCollection<KeyValuePair<TKey, TValue>>
{
private readonly ConcurrentDictionary<TKey, TValue> _dictionary;
private readonly ThreadLocal<IEnumerator<KeyValuePair<TKey, TValue>>> _enumerator;
public ConcurrentDictionaryProducerConsumer(
IEqualityComparer<TKey> comparer = default)
{
_dictionary = new(comparer);
_enumerator = new(() => _dictionary.GetEnumerator());
}
public bool TryAdd(KeyValuePair<TKey, TValue> entry)
{
if (!_dictionary.TryAdd(entry.Key, entry.Value))
throw new DuplicateKeyException();
return true;
}
public bool TryTake(out KeyValuePair<TKey, TValue> entry)
{
// Get a cached enumerator that is used only by the current thread.
IEnumerator<KeyValuePair<TKey, TValue>> enumerator = _enumerator.Value;
while (true)
{
enumerator.Reset();
if (!enumerator.MoveNext())
throw new InvalidOperationException();
entry = enumerator.Current;
if (!_dictionary.TryRemove(entry)) continue;
return true;
}
}
public int Count => _dictionary.Count;
public bool IsSynchronized => false;
public object SyncRoot => throw new NotSupportedException();
public KeyValuePair<TKey, TValue>[] ToArray() => _dictionary.ToArray();
public IEnumerator<KeyValuePair<TKey, TValue>> GetEnumerator()
=> _dictionary.GetEnumerator();
IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
public void CopyTo(KeyValuePair<TKey, TValue>[] array, int index)
=> throw new NotSupportedException();
public void CopyTo(Array array, int index) => throw new NotSupportedException();
}
public class DuplicateKeyException : InvalidOperationException { }
Usage example:
BlockingCollection<KeyValuePair<string, Item>> collection
= new(new ConcurrentDictionaryProducerConsumer<string, Item>());
//...
try { collection.Add(KeyValuePair.Create(key, item)); }
catch (DuplicateKeyException) { Console.WriteLine($"The {key} was rejected."); }
The collection.TryTake method removes a practically random key from the ConcurrentDictionary, which is unlikely to be a desirable behavior. Also the performance is not great, and the memory allocations are significant. For these reasons I don't recommend enthusiastically to use the above implementation. I would suggest instead to take a look at the ConcurrentQueueNoDuplicates<T> that I have posted here, which has a proper queue behavior.
Caution: Calling collection.TryAdd(item); is not having the expected behavior of returning false if the key exists. Any attempt to add a duplicate key results invariably in a DuplicateKeyException. For an explanation look at the aforementioned other post. | unknown | |
d13202 | train | You can use the --quiet option :
find /RAID/s.korea/onlyzip/ -name "*.zip" -type f -exec zcat -q {} \; |wc -c
Beware of the " " around the pattern. | unknown | |
d13203 | train | Define the type parameter of FutureBuilder:
// vvvvvvvv
FutureBuilder<Wetter>(
future: wetter,
builder: (context, snapshot) {
if (snapshot.hasData) {
grad = snapshot.data!.wetter;
}
throw '';
},
); | unknown | |
d13204 | train | Set your loop indices to
for ( i = 1; i < len-1; i++ )
and treat the first and last elements as special cases. They can be executed outside of the OpenMP regions.
A: There is an implicit barrier at the end of a parallel section. A way to improve the code would be to enclose all the function into a #pragma omp parallel directive, so that threads are spawned only once at the beginning rather than twice at sections 1 and 3.
The implicit barrier will still be there at the end of the omp for loops, but this is still less overhead than spawning new threads. Section 2 would then have to be enclosed in an omp single block (this may well be what you have done as you mention that omp single did not work better, but it is not 100% clear).
void Funct(double *vec, int len)
{
// Create threads
#pragma omp parallel
{
//Section 1
#pragma omp for
for (int i = 0; i < len; i++ ){
//Code that initialize vec, it simulates an initialization in the original code
vec [ i ] = i;
} // Implicit barrier here (end of omp for loop)
//Section 2
//This code must be run sequentially
// It will start only once the section 1 has been completed
#pragma omp single
{
double tmp;
tmp = vec [ 0 ];
vec [0 ] = vec [ len - 1 ];
vec [ len - 1 ] = tmp;
tmp = vec [ 0 ];
vec [0 ] = vec [ len - 1 ];
vec [ len - 1 ] = tmp;
} // Implicit barrier here (end of omp single block)
//End of the sequential code
//Section 3
#pragma omp for
for ( i = 0; i < len; i++ ) //Code to simulate loadwork on vec
{
vec [ i ] = pow(vec[i], 2 );
vec [ i ] = sqrt ( vec [ i ] );
vec [ i ] += 1;
vec [ i ] = pow(vec[i], 2 );
vec [ i ] = sqrt ( vec [ i ] );
vec [ i ] -= 1;
} // Implicit barrier here end of for
} // Implicit barrier here end of parallel + destroy threads
}
The best would be to move the omp parallel directive to the main function, so that threads are spawned only once. | unknown | |
d13205 | train | Please note: I figured out my problem. Capitalisation of lettering. I needed to change a small "f" to a capital "F" | unknown | |
d13206 | train | I would suggest using Mono for this project, since you're already in Windows Forms. You'll have difficulty with the database requirement using Silverlight, and WPF will not work at all.
A: Agreed on the Mono front. I would definitely check out Mono Develop - it's the cross platform Mono IDE. It's been released for Mac so that may help you port your app (or even develop it!)
A: I would suggest looking into Silverlight 4.0. Beta version is already available. I expect to see release at Mix 2010 (which is March 15-17th).
From what you requested it already supports
*
*HttpWebRequest / HTTP calls
*Windows / Dialogs (kind of)
*System Tray Presence
As for access to underlying Sqlite database (via ADO.net) this can be achieved through RIA Services.
Silverlight supports out of browser mode, and runs both on Window and Mac. IDE is VS 2010, and (probably) Eclipse. | unknown | |
d13207 | train | You need term id and level id in `detach_term
Route
Route::post('term/{id}/deleteLevel', 'ListController@detach_term');
Controller Action
public function detach_term(Request $request, $id)
{
$term = Term::with('level')->find($id);
if($term){
$postData = $request->all();
$term->level()->detach($postData['level_id']);
return redirect('term/get/' . $term->id);
}else{
abort(400, 'Invalid term');
}
}
Html Form in (term/get/2/edit) (Be sure you have $term sent from controller to view)
<form action="/term/{{$term->id}}/deleteLevel" method="POST" >
{{ csrf_field() }}
@foreach($term->level as $levels>
<input type="checkbox" name="level_id[]" value="{{ $levels->id }}">
{{ $levels->levelname }}
@endforeach
<button type="submit">Delete</button>
</form>
Check Attaching / Detaching of https://laravel.com/docs/5.6/eloquent-relationships#many-to-many | unknown | |
d13208 | train | Problem is the use of an older pandas version, because if you check DataFrame.droplevel:
New in version 0.24.0.
The solution is to use MultiIndex.droplevel:
toy.columns = toy.columns.droplevel(level = 1) | unknown | |
d13209 | train | I dont know what your variable name is but you could do something like this:
$option_value = $array[0]->option_value;
Since its an object inside, use the -> (arrow operator), and point the desired property. | unknown | |
d13210 | train | Add the plus button next to each input and give them a class of addRow (for example).
Map a function to .addRow's click event:
$(".addRow").click(function(){
$(this)
.prev("input") // select the previous input element
.clone() // clone said element
.appendTo("#formID"); // append the clone to the form
});
Keep in mind this will clone the existing values of the inputs too, so you may want to clear the clone's value using val() before appending it. | unknown | |
d13211 | train | To close the loop here, jamiet@ confirmed in the comment that root cause is that BigQuery does not support export from Views, it supports export only from Tables. | unknown | |
d13212 | train | For running any script file, system needs shell. But systemd do'nt have its own shell. So you need to provide shell for running script.
so use ExecStart=/bin/sh /usr/lib/systemd/init_script in your service unit.
[Unit]
Description=Loading module --module_name module
[Service]
Type=oneshot
ExecStart=/bin/sh /usr/lib/systemd/init_script
[Install]
WantedBy=multi-user.target
And
chmod 777 /usr/lib/systemd/init_script
before running your script. | unknown | |
d13213 | train | "Invalid next size" means that glibc has detected corruption in your memory arena.
You have overwritten valuable accounting information that's stored in between your allocated blocks.
With each block that malloc gives you, there is some accounting information stored close by. When you overwrite this information by, for example, writing 128 characters to a 20-character buffer, glibc may detect this the next time you try to free (or possibly allocate) some memory.
You need to find the root cause of this problem - it's not the free itself, that's just where the problem is being detected. Somewhere, some of your code is trashing memory and a memory analysis tool like valgrind will be invaluable here.
A: If the node is not found in the list, you will free the Tail node at the end of the function, without updating Tail to point to anything valid again.
Further using the list and the now deallocated Tail can easily result in memory corruption that might later be detected by glibc with a message like the one you got.
Also note that in (here->number==Number) you are comparing two pointers, not the values those pointers point to. I'm not sure if that's what you want. | unknown | |
d13214 | train | Each build in VSO uses a new VM that is spun up just for your build. Short
of hosting your own Build Server connected your VSO, I don't think it can be avoided.
Unless there are ways to speed up a the process of downloading the code from a git repo, I think you're stuck. | unknown | |
d13215 | train | See redirect:
import { redirect } from "react-router-dom";
const loader = async () => {
const user = await getUser();
if (!user) {
return redirect("/login");
}
};
(from the docs) | unknown | |
d13216 | train | I had this issue recently and the cause was that the stack I was using on the dash required specific, older versions of packages that were dependencies of ones in my requirements.
A good first step is to try installing the older version of websocket-client. If slackclient 1.3.2 works on the older version you;re good, otherwise you might need to try rolling back slackclient too. | unknown | |
d13217 | train | This might be a bug:
The publishedLink from a published Revision (for Sheets, Docs and Slides, at least) is not populated. This seems to be the case for both V2 and V3.
This behaviour was reported some time ago in Issue Tracker:
*
*Visible to Public After publishing the publishedlink is undefined
I'd suggest you to star the issue in order to give it more visibility.
In this case, I think a workaround could be to work with one of the Revision exportLinks instead. | unknown | |
d13218 | train | If you want to delete text other than 14 digits followed with a space, use (\b\d{14} )|. and replace with $1.
The pattern matches and captures (we can refer to the text captured with a backreference in the replacement pattern) the 14-digit chunks and then a space as whole word due to \b (a word boundary). If this text is not found, any character other than a newline is matched with . and is not captured (we cannot refer to it with a backreference).
Thus, when we replace with a backreference $1, we just restore the matched 14 digit chunk with a space.
See the regex demo at regex101.com.
To get the cleaner view, remove all empty lines: Edit > Line Operations > Remove Empty Lines.
A: You can use this negative lookahead:
^(?!.*[0-9]{14} )
*
*Make sure you use start anchor ^
*Also important to use .* before your pattern to disallow this anywhere in input | unknown | |
d13219 | train | Support the ES6 or not is rely on your Browser. It's more like you trying to use the class but there is no instance to access.
Transpiled to es5 class is available in PR#8656, the whole react class elements are supported to extend with this library. If you transpile your classes by ES5 or ES6, use below code:
// traspiled to es5
const whyDidYouRender = require('@welldone-software/why-did-you-render);
// traspiled to es6
const whyDidYouRender = require('@welldone-software/why-did-you-render/dist/no-classes-transpile/umd/whyDidYouRender.min.js'); | unknown | |
d13220 | train | It compares references - i.e. are both variables referring to the exact same object (rather than just equal ones).
*
*s and s2 refer to different objects, so the expression evaluates to false.
*s and s1 refer to the same objects (as each other) because of the assignment.
*s2 and s3 refer to the same objects (as each other) because of string interning.
If that doesn't help much, please ask for more details on a particular bit. Objects and references can be confusing to start with.
Note that only string literals are interned by default... so even though s and s2 refer to equal strings, they're still two separate objects. Similarly if you write:
String x = new String("foo");
String y = new String("foo");
then x == y will evaluate to false. You can force interning, which in this case would actually return the interned literal:
String x = new String("foo");
String y = new String("foo");
String z = "foo";
// Expressions and their values:
x == y: false
x == z: false
x.intern() == y.intern(): true
x.intern() == z: true
EDIT: A comment suggested that new String(String) is basically pointless. This isn't the case, in fact.
A String refers to a char[], with an offset and a length. If you take a substring, it will create a new String referring to the same char[], just with a different offset and length. If you need to keep a small substring of a long string for a long time, but the long string itself isn't needed, then it's useful to use the new String(String) constructor to create a copy of just the piece you need, allowing the larger char[] to be garbage collected.
An example of this is reading a dictionary file - lots of short words, one per line. If you use BufferedReader.readLine(), the allocated char array will be at least 80 chars (in the standard JDK, anyway). That means that even a short word like "and" takes a char array of 160 bytes + overheads... you can run out of space pretty quickly that way. Using new String(reader.readLine()) can save the day.
A: == compars objects not the content of an object. s and s2 are different objects. If you want to compare the content use s.equals(s2).
A: I suppose you know that when you test equality between variables using '==', you are in fact testing if the references in memory are the same. This is different from the equals() method that combines an algorithm and attributes to return a result stating that two Objects are considered as being the same. In this case, if the result is true, it normally means that both references are pointing to the same Object. This leaves me wondering why s2==s3 returns true and whether String instances (which are immutable) are pooled for reuse somewhere.
A: Think of it like this.
Identical twins look the same but they are made up differently.
If you want to know if they "look" the same use the compare.
If you want to know they are a clone of each other use the "=="
:)
A: == compares the memory (reference) location of the Objects. You should use .equals() to compare the contents of the object.
You can use == for ints and doubles because they are primitive data types
A: It should be an obvious false. JVM does a thing like using the strings that exist in the Memory . Hence s2,s3 point to the same String that has been instantiated once. If you do something like s5="Hai" even that will be equal to s3.
However new creates a new Object. Irrespective if the String is already exisitng or not. Hence s doesnot equal to s3,s4.
Now if you do s6= new String("Hai"), even that will not be equal to s2,s3 or s.
A: The literals s2 and s3 will point to the same string in memory as they are present at compile time. s is created at runtime and will point to a different instance of "Hai" in memory. If you want s to point to the same instance of "Hai" as s2 and s3 you can ask Java to do that for you by calling intern. So s.intern == s2 will be true.
Good article here.
A: You are using some '==' overload for String class... | unknown | |
d13221 | train | Try adding some headers that your browser would send, too. Also a cookie might be required - you could fetch one with a "normal" browser.
Example (curl):
curl "http://poalimparents.bankhapoalim.co.il/" -H "Host: poalimparents.bankhapoalim.co.il" -H "User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0" -H "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" -H "Accept-Language: de,en-US;q=0.7,en;q=0.3" --compressed -H "DNT: 1" -H "Cookie: rbzreqid=bnhp-rbzr0231343537323737353630894b901088a07d30; rbzid=YnVIYjc0K2RmTDFXZFdiZy95UTNrOUJ0NEp0MzgzbW9oNlhHcFdVMU1mR25oT1NGQXowdGdZbjR3WktuQ2ZBZ0ljUTVCbHFDK213bGZXRk1DekJMMnhQMVFZVG4zcHpNT1lEWTRpM3FVeiswbkxtaFVCR25CU0taQ2pnNU1IQ1Z5WDc2ZWgxa2ZxR25vR1JadFpTVThidWs0d0s5QUF2YUVRSG1QcUpsS1ltRGpYNzhPR0lpRDNkak1VRmVxdm5nY0RpM1dEUnYrWU1rR2R1c3pWY2JGZHd6ODlOdkxHUkxuOW03N0VzWC9oOD1AQEAwQEBALTc0MDc0MDczNDA-" -H "Connection: keep-alive" -H "If-Modified-Since: Mon, 22 Feb 2016 11:40:18 GMT" -H "If-None-Match: W/""af3920fb581df1e4de68d46a4694689a""" -H "Cache-Control: max-age=0" | unknown | |
d13222 | train | If you specify the return type, it silences this error, e.g.:
let r3 = reduce(array2D, []){result, x -> [Int] in
return result + x}
If you specify that that initial empty array is an Int array, the error is silenced:
let r4 = reduce(array2D, [Int]()){result, x in
return result + x} | unknown | |
d13223 | train | We got around our problem by switching lightboxes, rather than use ColorBox, use Simple Modal. I have a funny feeling it will work for you. Good luck!
A: <script>
$(document).ready(function() {
initCKEditor(); //function ckeditor.
$("#id_textarea").val(CKEDITOR.instances.id_textarea.getData());
});
</script>
A: Render/create ckeditor on colorbox "onComplete" callback.
See callbacks here : http://www.jacklmoore.com/colorbox
A: After some debugging I found that it is because of a CSS rule.
In the skin named kama you need to change the following CSS rule in mainui.css from:
.cke_skin_kama .cke_browser_ie.cke_browser_quirks .cke_contents iframe
{
position: absolute;
top: 0;
}
To:
.cke_skin_kama .cke_browser_ie.cke_browser_quirks .cke_contents iframe
{
top: 0;
}
Depending on your setup, you might need to change it in skins/kama/editor.css
I however recommend upgrading to a newer version of ckeditor. I found this issue on version 3.6.2. | unknown | |
d13224 | train | Seems like Xcode forgets to generate the scnassets when building on command line (xcodebuild). we fixed it with the following build rule:
Script:
${DEVELOPER_TOOLS_DIR}/../usr/bin/copySceneKitAssets "${INPUT_FILE_PATH}" -o "${DERIVED_FILE_DIR}/${INPUT_FILE_NAME}”
Output Files:
$(DERIVED_FILE_DIR)/${INPUT_FILE_NAME} | unknown | |
d13225 | train | Use the internal storage of the application.
Parse the image to a byteArray and the text to a single String so you can easily convert this also to a byteArray.
Name the files so you can easily retrieve them and link them back together.
A: You can storage the last updated data on local, with SharedPreferences or json file.
Instead of show toast, you can load the last updated data from local.
A: Picasso is a good choice for images, it saves images locally and reuse it automatically later.
For your items, I suggest to use a small local DataBase : in Android, we use SQLiteDataBase. here is a small tutorial : http://www.androidhive.info/2011/11/android-sqlite-database-tutorial/
So, I suggest you this pattern
*
*user opens activity
*system retrieves data from database
*in parallel, system starts to download the new items.
*When new items have been downloaded, you should notice user like 9gag or Facebook apps do. | unknown | |
d13226 | train | Yes it is. System browser is Nautilus, a new advanced alternative for old class browser created by Benjamin Van Ryseghem.
You can still open the old one in Pharo 2.0 by executing Browser open but I would highly recommend to use Nautilus which is default for 2.0 version of Pharo.
A: You should not use Pharo 2.0 to follow Pharo by Example book, it has it's own image, which is an older version of Pharo.
You can download the right image from here: http://pharobyexample.org/image/PBE-OneClick-1.1.app.zip
After finishing learning with it, you can then use 2.0 safely :) | unknown | |
d13227 | train | So now it's working
Sub Prideti_produkta()
LastRow = ActiveSheet.Cells(Rows.Count, "D").End(xlUp).Row
Range("D" & LastRow + 1).EntireRow.Insert
Range("H" & LastRow + 1).FillDown
Range("K" & LastRow + 1).FillDown
Range("M" & LastRow + 1).FillDown
Range("N" & LastRow + 1).FillDown
With Range("D" & Rows.Count).End(xlUp).Offset(1)
.Value = .Offset(-1).Value + 1
.Offset(, -1).Interior.ColorIndex = 0
.Offset(, -2).Interior.ColorIndex = 0
.Offset(, -3).Interior.ColorIndex = 0
End With
End Sub | unknown | |
d13228 | train | If you omit the @Valid Spring will omit the validation on the web layer. Which basically means your controller, will not trigger validation. Validation is done by the RequestMappingHandlerAdapter when an @Valid(adted) annotation is found on an @ModelAttribute or @RequestBody annotated method argument.
JPA will also use a configured validator and trigger validation as well. So a validation exception will be thrown upon writing into the database. You can disable this with spring.jpa.properties.javax.persistence.validation.mode=none in your application.properties (or the YAML equivalent). When disabling both no validation will be done. In that case you only hope is a database constraint that the column isn't allowed to be null.
So validation is still done but the location where the validation is done is different. Due to the difference you will also get a different exception.
You have to wonder do you really want to do this somewhere upon persisting the entity with the risk of having executed some complex/time consuming business logic, or quickly upon submission of the form.
A: So that the object can be validated in the Controller layer you need @Valid (or @Validated) to trigger its validation.
You can read more about this on the following online resources:
*
*https://www.baeldung.com/spring-boot-bean-validation
*https://www.baeldung.com/spring-valid-vs-validated
On which you can read:
Of course, the most relevant part is the use of the @Valid annotation.
When Spring Boot finds an argument annotated with @Valid, it
automatically bootstraps the default JSR 380 implementation —
Hibernate Validator — and validates the argument. | unknown | |
d13229 | train | You may restore the .abf files into Azure Analysis Servive instance by uploading the file into associated blob storage of AS instance
https://azure.microsoft.com/en-us/blog/backup-and-restore-your-azure-analysis-services-models/
https://www.neowin.net/news/azure-analysis-services-update-brings-backup-and-restore-new-pricing-options-and-more | unknown | |
d13230 | train | Typescript comment directives are not very JSX friendly, sadly. But you can definitely make it work.
Give it one child, in braces, { }, with the comment in those braces.
<SomeComponentFromLibrary>
{/* @ts-expect-error */
<MyComponent />
}
</SomeComponentFromLibrary>
Playground
Also, side note: The typings of SomeComponentFromLibrary are probably just wrong. In nearly all cases I've ever seen, children should be typed as React.ReactNode. And when you type this function as such, it now works exactly like you expect.
So I'd probably create a pull request for whatever this library is to fix that.
A: Looks like this is a bug: https://github.com/microsoft/TypeScript/issues/41125
It's fixed here: https://github.com/microsoft/TypeScript/pull/41166 and should be released either in 4.1 or 4.2 | unknown | |
d13231 | train | Are you trying to do to download large files from the server, I have changed the below settings in php.ini file in mine to do so :
Upload_max_filesize - 1500 M
Max_input_time - 1000
Memory_limit - 640M
Max_execution_time - 1800
Post_max_size - 2000 M | unknown | |
d13232 | train | The write! macro internally uses write_fmt which takes a &mut self. Your write!(&f, ...) is actually the odd one since the object being written to should be mutable, however you have an immutable File.
How does this work then? Write has an additional implementation for &Files. So you can write to a mutable reference to an immutable file. I'm not positive, but I believe this was added as a deliberate workaround to allow Files to be written to immutably.
It works in the first case because &f creates a temporary, which can be used mutably. The reason why write!(f, ... (with f being &File) doesn't work is because f is a variable that write_fmt wants to modify, so it needs to be mut.
fn write_hi(mut f: &File) {
// ^^^
write!(f, "hi").unwrap();
}
See also:
*
*Why is it possible to implement Read on an immutable reference to File? | unknown | |
d13233 | train | It doesn't seem like it can be done using limit, (plus you would have to do some complex pagination logic to get all records, because you would need know the total number of records and the api does not have a method for that) See api call list @ http://www.magentocommerce.com/api/soap/sales/salesOrder/sales_order.list.html
But what you could do as a work around is use complex filters, to limit the result set base on creation date. (adjust to ever hour, day or week base on order volume).
Also, since you are using status type (assuming that you are excluding more that just cancel order), you may want to think about getting all order and keep track of the order_id/status locally (only process the ones with the above status) and the remainder that wasn't proceed would be a list of order id that may need your attention later on
Pseudo Code Example
$params = array(array(
'filter' => array(
array(
'key' => 'status',
'value' => array(
'key' => 'in',
'value' => $orderstatusarray,
),
),
),
'complex_filter' => array(
array(
'key' => 'created_at',
'value' => array(
'key' => 'gteq',
'value' => '2012-11-25 12:00:00'
),
),
array(
'key' => 'created_at',
'value' => array(
'key' => 'lteq',
'value' => '2012-11-26 11:59:59'
),
),
)
));
$orderListRaw = $proxy -> call ( $sessionId, 'sales_order.list', $params);
Read more about filtering @ http://www.magentocommerce.com/knowledge-base/entry/magento-for-dev-part-8-varien-data-collections | unknown | |
d13234 | train | You can use split() and slice() operations:
var MapId ='Library://London/Maps/Main-Mobile.MapDefinition';
MapId = MapId.split(/\//).slice(0,3).join('/') + '/';
console.log(MapId); | unknown | |
d13235 | train | ok using the following config in mule-deploy.properties helped
loader.override=-org.apache.ws.security.util.WSSecurityUtil
ref
http://www.mulesoft.org/documentation/display/MULE3USER/Classloader+Control+in+Mule
but still loads of issues with jars coming built in Mule Studio as plugins and then trying to deploy in standalone Mule. Will create an assorted list of such issues in another thread. | unknown | |
d13236 | train | Using absolute positioning rather than relative may do the trick. I'll test this theory and edit my answer accordingly.
Edit: using position: absolute; margin-top: -250px; seems to be the solution. | unknown | |
d13237 | train | read.csv()
You can use the read.csv() function. But there would be some warning messages (or use suppressWarnings() to wrap around the read.csv() function). If you wish to avoid warning messages, using the scan() method in the next section.
library(dplyr)
read.csv("./path/to/your/file.csv", sep = ";",
col.names = c("name", "hobbies", "age", "X4")) %>%
mutate(hobbies = ifelse(is.na(X4), hobbies, paste0(hobbies, ";" ,age)),
age = ifelse(is.na(X4), age, X4)) %>%
select(-X4)
scan() file
You can first scan() the CSV file as a character vector first, then split the string with pattern ; and change it into a dataframe. After that, do some mutate() to identify your target column and remove unnecessary columns. Finally, use the first row as the column name.
library(tidyverse)
library(janitor)
semicolon_file <- scan(file = "./path/to/your/file.csv", character())
semicolon_df <- data.frame(str_split(semicolon_file, ";", simplify = T))
semicolon_df %>%
mutate(X4 = na_if(X4, ""),
X2 = ifelse(is.na(X4), X2, paste0(X2, ";" ,X3)),
X3 = ifelse(is.na(X4), X3, X4)) %>%
select(-X4) %>%
janitor::row_to_names(row_number = 1)
Output
name hobbies age
2 Jon cooking 38
3 Bill karate;jogging 41
4 Maria fishing 32
A: Assuming that you have the columns name and age with a single entry per observation and hobbies with possible multiple entries the following approach works:
*
*read in the file line by line instead of treating it as a table:
tmp <- readLines(con <- file("table.csv"))
close(con)
*Find the position of the separator in every row. The entry before the first separator is the name the entry after the last is the age:
separator_pos <- gregexpr(";", tmp)
name <- character(length(tmp) - 1)
age <- integer(length(tmp) - 1)
hobbies <- vector(length=length(tmp) - 1, "list")
*fill the three elements using a for loop:
# the first line are the colnames
for(line in 2:length(tmp)){
# from the beginning of the row to the first";"
name[line-1] <- strtrim(tmp[line], separator_pos[[line]][1] -1)
# between the first ";" and the last ";".
# Every ";" is a different elemet of the list
hobbies[line-1] <- strsplit(substr(tmp[line], separator_pos[[line]][1] +1,
separator_pos[[line]][length(separator_pos[[line]])]-1),";")
#after the last ";", must be an integer
age[line-1] <- as.integer(substr(tmp[line],separator_pos[[line]][length(separator_pos[[line]])]+1,
nchar(tmp[line])))
}
*Create a separate matrix to hold the hobbies and fill it rowwise:
hobbies_matrix <- matrix(NA_character_, nrow = length(hobbies), ncol = max(lengths(hobbies)))
for(line in 1:length(hobbies))
hobbies_matrix[line,1:length(hobbies[[line]])] <- hobbies[[line]]
*Add all variable to a data.frame:
df <- data.frame(name = name, hobbies = hobbies_matrix, age = age)
> df
name hobbies.1 hobbies.2 age
1 Jon cooking <NA> 38
2 Bill karate jogging 41
3 Maria fishing <NA> 32
A: You could also do:
read.csv(text=gsub('(^[^;]+);|;([^;]+$)', '\\1,\\2', readLines('file.csv')))
name hobbies age
1 Jon cooking 38
2 Bill karate;jogging 41
3 Maria fishing 32
A: Ideally you'd ask whoever generated the file to do it properly next time :) but of course this is not always possible.
Easiest way is probably to read the lines from the file into a character vector, then clean up and make a data frame by string matching.
library(readr)
library(dplyr)
library(stringr)
# skip header, add it later
dataset <- read_lines("your_file.csv", skip = 1)
dataset_df <- data.frame(name = str_match(dataset, "^(.*?);")[, 2],
hobbies = str_match(dataset, ";(.*?);\\d")[, 2],
age = as.numeric(str_match(dataset, ";(\\d+)$")[, 2]))
Result:
name hobbies age
1 Jon cooking 38
2 Bill karate;jogging 41
3 Maria fishing 32
A: Using the file created in the Note at the end
1) read.pattern can read this by specifying the pattern as a regular expression with the portions within parentheses representing the fields.
library(gsubfn)
read.pattern("hobbies.csv", pattern = '^(.*?);(.*);(.*)$', header = TRUE)
## name hobbies age
## 1 Jon cooking 38
## 2 Bill karate;jogging 41
## 3 Maria fishing 32
2) Base R Using base R we can read in the lines, put quotes around the middle field and then read it in normally.
L <- "hobbies.csv" |>
readLines() |>
sub(pattern = ';(.*);', replacement = ';"\\1";')
read.csv2(text = L)
## name hobbies age
## 1 Jon cooking 38
## 2 Bill karate;jogging 41
## 3 Maria fishing 32
Note
Lines <- "name;hobbies;age
Jon;cooking;38
Bill;karate;jogging;41
Maria;fishing;32
"
cat(Lines, file = "hobbies.csv") | unknown | |
d13238 | train | This
cmd="ssh -q %s "command-dist --host=%s -- cmd -- 'cmd1;cmd2'"" %(host1,host2)
is understood by Python as
cmd = <some string>command-dist --host=%s -- cmd -- 'cmd1;cmd2'<some string>
because you use the same quotes inside and outside.
Try instead to escape the internal quotes with a backslash:
cmd="ssh -q %s \"command-dist --host=%s -- cmd -- 'cmd1;cmd2'\"" %(host1,host2)
Aside from that, you should use the subprocess module and supply your command as a list to subprocess.Popen. | unknown | |
d13239 | train | Use whitespace <match tag1 tag2 tagN>
From official docs
When multiple patterns are listed inside a single tag (delimited by one or more whitespaces), it matches any of the listed patterns:
*
*The patterns match a and b
*The patterns <match a.** b.*> match a, a.b, a.b.c (from the first pattern) and b.d (from the second pattern). | unknown | |
d13240 | train | In [330]: b = np.zeros((3,5),int)
To set the (3,2) columns, the row indices need to be (3,1) shape (matching by broadcasting):
In [331]: indices = np.array([[1,3],[0,1],[0,3]])
In [332]: b[np.arange(3)[:,None], indices] = 1
In [333]: b
Out[333]:
array([[0, 1, 0, 1, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 1, 0]])
put along does the same thing:
In [335]: b = np.zeros((3,5),int)
In [337]: np.put_along_axis(b, indices,1,axis=1)
In [338]: b
Out[338]:
array([[0, 1, 0, 1, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 1, 0]])
A: On solution to build the indices in each dimension and then use a basic indexing:
from itertools import chain
b = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
# Find the indices along the axis 0
y = np.arange(len(indices)).repeat(np.fromiter(map(len, indices), dtype=np.int_))
# Flatten the list and convert it to an array
x = np.fromiter(chain.from_iterable(indices), dtype=np.int_)
# Finaly set the items
b[y, x] = 1
It works even for indices lists with variable-sized sub-lists like indices = [[1, 3], [0, 1], [0, 2, 3]]. If your indices list always contains the same number of items in each sub-list then you can use the (more efficient) following code:
b = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
indices = np.array(indices)
n, m = indices.shape
y = np.arange(n).repeat(m)
x = indices.ravel()
b[y, x] = 1
A: Simple one-liner based on Jérôme's answer (requires all items of indices to be equal-length):
>>> b[np.arange(np.size(indices)) // len(indices[0]), np.ravel(indices)] = 1
>>> b
array([[0, 1, 0, 1, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 1, 0]]) | unknown | |
d13241 | train | Could not locate pygubudesigner within my python310\Scripts folder and any other but the cmd command mentioned in this issue: https://github.com/alejandroautalan/pygubu/issues/222 worked for me
python -m pygubudesigner | unknown | |
d13242 | train | I found solution :)
Thank you Bro @IsGreen
Source : http://forum.unity3d.com/threads/solved-raycast-multitouch-input-convert-local-recttransform-rect-to-screen-position.318115/
using UnityEngine;
using UnityEngine.UI;
using UnityEngine.EventSystems;
using System.Collections.Generic;
public class UImageRayCast : MonoBehaviour
{
Image image;
Color colorOn, colorOff;
void Start()
{
this.image = this.GetComponent<Image>();
this.colorOff = this.image.color;
this.colorOn = new Color(this.colorOff.r, this.colorOff.g, this.colorOff.b, this.colorOff.a * 0.5f);
}
void Update()
{
this.image.color = this.colorOff;
PointerEventData pointer = new PointerEventData(EventSystem.current);
List<RaycastResult> raycastResult = new List<RaycastResult>();
foreach (Touch touch in Input.touches)
{
pointer.position = touch.position;
EventSystem.current.RaycastAll(pointer, raycastResult);
foreach (RaycastResult result in raycastResult)
{
if (result.gameObject == this.gameObject)
{
if(touch.phase == TouchPhase.Began)
{
Debug.Log("Began " + result.gameObject.name );
}
else if(touch.phase == TouchPhase.Moved || touch.phase == TouchPhase.Stationary)
{
this.image.color = this.colorOn;
}
else if (touch.phase == TouchPhase.Ended)
{
Debug.Log("End " + result.gameObject.name);
}
//this.gameObject.transform.position = touch.position;
//Do Stuff
}
}
raycastResult.Clear();
}
} | unknown | |
d13243 | train | By default, for JSON don't work with NULL VALUES. Use INCLUDE_NULL_VALUES to handle.
As Example: JSON PATH, WITHOUT_ARRAY_WRAPPER, INCLUDE_NULL_VALUES) AS customer
As reference:
https://learn.microsoft.com/en-us/sql/relational-databases/json/include-null-values-in-json-include-null-values-option?view=sql-server-ver15 | unknown | |
d13244 | train | For the moment, I see no way of working around this issue. The only solution is to not to use Angular animation if CSP is set to prohibit inline style. Simply translating Angular animation code to css and use ngClass to achieve the same effect. | unknown | |
d13245 | train | I understand that I used another type of DB, but I want to give a hint. I am using sql server 2019.
Firstly, you need to substitute date and only then take datepart from it.
Queries:
--dateadd -1 would subtract 1 from current month
--(Jan - 1 2022), would be December 2021
select datepart(month, dateadd(month, -1, getdate()))
--also date add covers internally the problem with 30,31 days.
--May always has 31 days, April 30. So -1 subtraction from 31th of May,would result in 30th of April.
select dateadd(month, -1, cast('2021-05-31 10:00:00' as datetime)) | unknown | |
d13246 | train | To preserve whitespace that was passed into the script, use the $@ parameter:
/tmp/p.pl "$@" 1>/tmp/chk.out 2>&1
The quotation marks are necessary to make sure that quoted whitespace is seen by p.pl. | unknown | |
d13247 | train | That's a wrong approach. Board file supposed to register device drivers and pass important information to them, rather than act as a device driver itself. I'm not sure if what you're trying to do is even possible.
If you really need to extract something from your I2C device on a very early stage - do that in the bootloader and pass the data to kernel via cmdline (U-boot, by the way, has I2C support for a quite some time). Then later, kernel might do appropriate actions depending on what you have passed to it. | unknown | |
d13248 | train | After my test, I found that your problem may lie in your Js.
Try to change your code:
for (var i = 0; i < result.length; i++)
{
s += '<option value="' + result[i].Id + '">' + result[i].Tipe + '</option>';
}
To
for (var i = 0; i < result.length; i++)
{
s += '<option value="' + result[i].id + '">' + result[i].tipe + '</option>';
}
id and tipe the first letter should be lowercase. | unknown | |
d13249 | train | A presence validation should solve the problem in case of creation and modification of Promotions.
class Promotion < ActiveRecord::Base
has_many :promotion_sweepstakes
has_many :sweepstakes,
:through => :promotion_sweepstakes
validates :sweepstakes, :presence => true
end
In order to assure consistency when there's an attempt to delete or update a Sweepstake or a PromotionSweepstake you'd have to write your own validations for those two classes. They would have to check whether previously referenced Promotions are still valid, i.e. still have some Sweepstakes.
A simple solution would take and advantage of validates :sweepstakes, :presence => true in Promotion. After updating referenced PromotionSweepstakes or Sweepstakes in a transaction you would have to call Promotion#valid? on previously referenced Promotions. If they're not valid you roll back the transaction as the modification broke the consistency.
Alternatively you could use before_destroy in both PromotionSweepstake and Sweepstake in order to prevent changes violating your consistency requirements.
class PromotionSweepstake < ActiveRecord::Base
belongs_to :promotion
belongs_to :sweepstake
before_destroy :check_for_promotion_on_destroy
private
def check_for_promotion_on_destroy
raise 'deleting the last sweepstake' if promotion.sweepstakes.count == 1
end
end
class Sweepstake < ActiveRecord::Base
has_many :promotion_sweepstakes
has_many :promotions, :through => :promotion_sweepstakes
before_destroy :check_for_promotions_on_destroy
private
def check_for_promotions_on_destroy
promotions.each do |prom|
raise 'deleting the last sweepstake' if prom.sweepstakes.count == 1
end
end
end | unknown | |
d13250 | train | Assuming you know the size of the block of memory (hard coded to 100 here):
for (int i = 0; i < 100; i++)
{
char c = p[i];
if (c != 0)
printf("%c", c);
}
Minor nit but in your sample above, the string will be "asdfghi123", because the memcpy for "1234" is only copying 3 bytes.
A: Since the NUL bytes don’t mess up the way the text appears (I think) and that’s all you’re looking for, you should be able to write all of the bytes directly to stdout:
fwrite(p, sizeof(char), 100, stdout); | unknown | |
d13251 | train | The error ValueError: cannot reindex from a duplicate axis indicates in this case that you have duplicate entries in your index (and for this reason, it cannot assign to a new column, as pandas cannot know where to place the values for the duplicate entries).
To check for duplicate values in the index, you can do:
df.index.get_duplicates()
And then to get rid of the duplicate values (if you don't need to keep the original index), you can eg do df.reset_index(drop=True), or you can use ignore_index=True in append or concat. | unknown | |
d13252 | train | The answer by @chqrlie is the good and final answer, yet to complete the post, I am posting the Cython version along with the benchmarking results.
In short, the proposed solution is 2 times faster than qsort on long vectors!
cdef void qswap2(void *aptr, void *bptr, size_t size) nogil:
cdef uint8_t* ac = <uint8_t*>aptr
cdef uint8_t* bc = <uint8_t*>bptr
cdef uint8_t t
while (size > 0): t = ac[0]; ac[0] = bc[0]; bc[0] = t; ac += 1; bc += 1; size -= 1
cdef struct qselect2_stack:
uint8_t *base
uint8_t *last
cdef void qselect2(void *base, size_t nmemb, size_t size,
size_t k, QComparator compar) nogil:
cdef qselect2_stack stack[64]
cdef qselect2_stack *sp = &stack[0]
cdef uint8_t *lb
cdef uint8_t*ub
cdef uint8_t *p
cdef uint8_t *i
cdef uint8_t *j
cdef uint8_t *top
if (nmemb < 2 or size <= 0):
return
top = <uint8_t *>base
if(k < nmemb):
top += k*size
else:
top += nmemb*size
sp.base = <uint8_t *>base
sp.last = <uint8_t *>base + (nmemb - 1) * size
sp += 1
cdef size_t offset
while (sp > stack):
sp -= 1
lb = sp.base
ub = sp.last
while (lb < ub and lb < top):
#select middle element as pivot and exchange with 1st element
offset = (ub - lb) >> 1
p = lb + offset - offset % size
qswap2(lb, p, size)
#partition into two segments
i = lb + size
j = ub
while 1:
while (i < j and compar(lb, i) > 0):
i += size
while (j >= i and compar(j, lb) > 0):
j -= size
if (i >= j):
break
qswap2(i, j, size)
i += size
j -= size
# move pivot where it belongs
qswap2(lb, j, size)
# keep processing smallest segment, and stack largest
if (j - lb <= ub - j):
sp.base = j + size
sp.last = ub
sp += 1
ub = j - size
else:
sp.base = lb
sp.last = j - size
sp += 1
lb = j + size
cdef int int_comp(void* a, void* b) nogil:
cdef int ai = (<int*>a)[0]
cdef int bi = (<int*>b)[0]
return (ai > bi ) - (ai < bi)
def pyselect2(numpy.ndarray[int, ndim=1, mode="c"] na, int n):
cdef int* a = <int*>&na[0]
qselect2(a, len(na), sizeof(int), n, int_comp)
Here are the benchmark results (1,000 tests):
#of elements K #qsort (s) #qselect2 (s)
1,000 50 0.1261 0.0895
1,000 100 0.1261 0.0910
10,000 50 0.8113 0.4157
10,000 100 0.8113 0.4367
10,000 1,000 0.8113 0.4746
100,000 100 7.5428 3.8259
100,000 1,000 7,5428 3.8325
100,000 10,000 7,5428 4.5727
For those who are curious, this piece of code is a jewel in the field of surface reconstruction using neural networks.
Thanks again to @chqrlie, your code is unique on The Web.
A: Here is a quick implementation for your purpose: qsort_select is a simple implementation of qsort with automatic pruning of unnecessary ranges.
Without && lb < top, it behaves like the regular qsort except for pathological cases where more advanced versions have better heuristics. This extra test prevents complete sorting of ranges that are outside the target 0 .. (k-1). The function selects the k smallest values and sorts them, the rest of the array has the remaining values in an undefinite order.
#include <stdio.h>
#include <stdint.h>
static void exchange_bytes(uint8_t *ac, uint8_t *bc, size_t size) {
while (size-- > 0) { uint8_t t = *ac; *ac++ = *bc; *bc++ = t; }
}
/* select and sort the k smallest elements from an array */
void qsort_select(void *base, size_t nmemb, size_t size,
int (*compar)(const void *a, const void *b), size_t k)
{
struct { uint8_t *base, *last; } stack[64], *sp = stack;
uint8_t *lb, *ub, *p, *i, *j, *top;
if (nmemb < 2 || size <= 0)
return;
top = (uint8_t *)base + (k < nmemb ? k : nmemb) * size;
sp->base = (uint8_t *)base;
sp->last = (uint8_t *)base + (nmemb - 1) * size;
sp++;
while (sp > stack) {
--sp;
lb = sp->base;
ub = sp->last;
while (lb < ub && lb < top) {
/* select middle element as pivot and exchange with 1st element */
size_t offset = (ub - lb) >> 1;
p = lb + offset - offset % size;
exchange_bytes(lb, p, size);
/* partition into two segments */
for (i = lb + size, j = ub;; i += size, j -= size) {
while (i < j && compar(lb, i) > 0)
i += size;
while (j >= i && compar(j, lb) > 0)
j -= size;
if (i >= j)
break;
exchange_bytes(i, j, size);
}
/* move pivot where it belongs */
exchange_bytes(lb, j, size);
/* keep processing smallest segment, and stack largest */
if (j - lb <= ub - j) {
sp->base = j + size;
sp->last = ub;
sp++;
ub = j - size;
} else {
sp->base = lb;
sp->last = j - size;
sp++;
lb = j + size;
}
}
}
}
int int_cmp(const void *a, const void *b) {
int aa = *(const int *)a;
int bb = *(const int *)b;
return (aa > bb) - (aa < bb);
}
#define ARRAY_SIZE 50000
int array[ARRAY_SIZE];
int main(void) {
int i;
for (i = 0; i < ARRAY_SIZE; i++) {
array[i] = ARRAY_SIZE - i;
}
qsort_select(array, ARRAY_SIZE, sizeof(*array), int_cmp, 50);
for (i = 0; i < 50; i++) {
printf("%d%c", array[i], i + 1 == 50 ? '\n' : ',');
}
return 0;
} | unknown | |
d13253 | train | In order to update an existing item (question), the code must first get that item by it's item type, and there are many different methods for getting items by their type.
There is a different method for each type of question. The types of questions are:
*
*CHECKBOX
*DATE
*DATETIME
*DURATION
*GRID
*LIST
*MULTIPLE_CHOICE
*PARAGRAPH_TEXT
*SCALE
*TEXT
*TIME
In order to update an existing item, the code must first get that item by it's item type. Here are some examples:
*
*asCheckboxItem()
*asDateItem()
*asListItem()
*Etc
For example:
var myCheckBoxItem = FormApp.openById(id).getItemById(id).asCheckboxItem();
Once the code has obtained an item as the correct item, you can change it the same way that you created it in the first place.
function editFormItem() {
var form = FormApp.getActiveForm();
var allItems = form.getItems();
var i,
L=0,
thisItem,
thisItemType,
myCheckBoxItem;
L = allItems.length;
for (i=0;i<L;i++) {
thisItem = allItems[i];
thisItemType = thisItem.getType();
//Logger.log('thisItemType: ' + thisItemType);
if (thisItemType===FormApp.ItemType.CHECKBOX) {
myCheckBoxItem = thisItem.asCheckboxItem();
myCheckBoxItem.setChoiceValues(values)
};
};
};
The above script is not complete. You need to somehow match up what item goes with the new changes. If all your Form questions are the same item type, then you won't need to test for what the item type is.
There are 3 item types that get returned by getItems() that are not question items. They are:
*
*IMAGE
*PAGE_BREAK
*SECTION_HEADER
So, if you have any of those 3 in your form, you should check the item type. | unknown | |
d13254 | train | This can be accomplished via a series of regex checks and then a loop to remove all items with less than 2 characters:
Code
import re
with open("text.txt", "r") as fi:
lowerFile = re.sub("[^\w ]", "", fi.read().lower())
lowerFile = re.sub("(^| )[^ ]*[^a-z ][^ ]*(?=$| )", "", lowerFile)
words = [word for word in lowerFile.split() if len(word) >= 2]
print(words)
Input
I li6ke to swim, dance, and Run r8un88.
Output
['to', 'swim', 'dance', 'and', 'run'] | unknown | |
d13255 | train | The easist way to add fancy-button component dynamically might be as follows:
1) Add component to entryComponents array of your module
@NgModule({
imports: [ BrowserModule ],
declarations: [ AppComponent, GridStackComponent, FancyButtonComponent ],
entryComponents: [ FancyButtonComponent ], // <== here
bootstrap: [ AppComponent ]
})
export class AppModule { }
2) Get root node from compiled component
constructor(private vcRef: ViewContainerRef, private componentFactoryResolver: ComponentFactoryResolver) { }
getRootNodeFromParsedComponent(component) {
const componentFactory = this.componentFactoryResolver.resolveComponentFactory(component);
const ref = this.vcRef.createComponent(componentFactory, this.vcRef.length, this.vcRef.injector);
const hostView = <EmbeddedViewRef<any>>ref.hostView;
return hostView.rootNodes[0];
}
3) Use it anywhere
const fancyBoxElement = this.getRootNodeFromParsedComponent(FancyBoxComponent);
$('#someId').append(fancyBoxElement);
Here's Plunker Example for your case
If you're looking for something more complicated then there are a lot of answers where you can use angular compiler to do it working
*
*Load existing components dynamically Angular 2 Final Release
*Equivalent of $compile in Angular 2
*How can I use/create dynamic template to compile dynamic Component with Angular 2.0?
*Angular 2.1.0 create child component on the fly, dynamically
*How to manually lazy load a module?
A: You need to use Angular 2’s ViewContainerRef class, which provides a handy createComponent method. The ViewContainerRef can be informally thought of as a location in the DOM where new components can be inserted.
this.cmpRef = this.vcRef.createComponent(factory, 0, injector, []);
Here's a working plunker example.
Or you can use the generic HTML outlete from this post | unknown | |
d13256 | train | Yes, definitely. You should go for the Strategy solution.
And in my experience, there is almost never a case of too much classes, as you put it. On the contrary, the more modular your code is, the easier it is to test/maintain/deploy it.
You'll run a lot in the opposite problem: a class you thought is small enough and there will be no reason to change, and then after a change in the requirements or a refactoring you see that you need to make it more modular. | unknown | |
d13257 | train | The recommended way is to use threading.event (You can combine this with event.wait if you want to sleep in that thread too however as you are waiting for a user event, probably dont need that).
import threading
e = threading.Event()
def thread_one():
while True:
if e.is_set():
break
print("do something")
print('loop ended!')
t1=threading.Thread(target=thread_one)
t1.start()
# and in other thread:
import time
time.sleep(0.0001) # just to show thread_one keeps printing
# do something for little bit and then it break
e.set()
EDIT: To interrupt the thread while it's waiting for user input you can send SIGINT to that thread and and it will raise KeyboardInterrupt which you can then handle. Unfortunate limitation of python, including python3, is that signals to all threads are handled in the main thread so you need to wait for the user input in the main thread:
import threading
import sys
import os
import signal
import time
def thread_one():
time.sleep(10)
os.kill(os.getpid(), signal.SIGINT)
t1=threading.Thread(target=thread_one)
t1.start()
while True:
try:
print("waiting: ")
sys.stdin.readline()
except KeyboardInterrupt:
break
print("loop ended") | unknown | |
d13258 | train | I strongly disagree with @ttulka's answer, so I have decided to add my own as well.
Given you received an event in your Lambda function, it's very likely you'll process the event and then invoke some other service. It could be a call to S3, DynamoDB, SQS, SNS, Kinesis...you name it. What is there to be asserted at this point?
Correct arguments!
Consider the following event:
{
"data": "some-data",
"user": "some-user",
"additionalInfo": "additionalInfo"
}
Now imagine you want to invoke documentClient.put and you want to make sure that the arguments you're passing are correct. Let's also say that you DON'T want the additionalInfo attribute to be persisted, so, somewhere in your code, you'd have this to get rid of this attribute
delete event.additionalInfo
right?
You can now create a unit test to assert that the correct arguments were passed into documentClient.put, meaning the final object should look like this:
{
"data": "some-data",
"user": "some-user"
}
Your test must assert that documentClient.put was invoked with a JSON which deep equals the JSON above.
If you or any other developer now, for some reason, removes the delete event.additionalInfo line, tests will start failing.
And this is very powerful! If you make sure that your code works the way you expect, you basically don't have to worry about creating integration tests at all.
Now, if a SQS consumer Lambda expects the body of the message to contain some field, the producer Lambda should always take care of it to make sure the right arguments are being persisted in the Queue. I think by now you get the idea, right?
I always tell my colleagues that if we can create proper unit tests, we should be good to go in 95% of the cases, leaving integration tests out. Of course it's better to have both, but given the amount of time spent on creating integration tests like setting up environments, credentials, sometimes even different accounts, is not worth it. But that's just MY opinion. Both you and @ttulka are more than welcome to disagree.
Now, back to your question:
You can use Sinon to mock and assert arguments in your Lambda functions. If you need to mock a 3rd-party service (like DynamoDB, SQS, etc), you can create a mock object and replace it in your file under test using Rewire. This usually is the road I ride and it has been great so far.
A: Try setting aws-sdk module explicitly.
Project structures that don't include the aws-sdk at the top level node_modules project folder will not be properly mocked. An example of this would be installing the aws-sdk in a nested project directory. You can get around this by explicitly setting the path to a nested aws-sdk module using setSDK().
const AWSMock = require('aws-sdk-mock');
import AWS = require('aws-sdk');
AWSMock.setSDKInstance(AWS);
For more details on this : Read aws-sdk-mock documentation, they have explained it even better.
A: I see unit testing as a way to check if your domain (business) rules are met.
As far as your Lambda contains exclusively only integration of AWS services, it doesn't make much sense to write a unit test for it.
To mock all the resources means, your test will be testing only communication among those mocks - such a test has no value.
External resources mean input/output, this is what integration testing focuses on.
Write integration tests and run them as a part of your integration pipeline against real deployed resources.
A: This is how we can mock STS in nodeJs.
import { STS } from 'aws-sdk';
export default class GetCredential {
constructor(public sts: STS) { }
public async getCredentials(role: string) {
this.log.info('Retrieving credential...', { role });
const apiRole = await this.sts
.assumeRole({
RoleArn: role,
RoleSessionName: 'test-api',
})
.promise();
if (!apiRole?.Credentials) {
throw new Error(`Credentials for ${role} could not be retrieved`);
}
return apiRole.Credentials;
}
}
Mock for the above function
import { STS } from 'aws-sdk';
import CredentialRepository from './GetCredential';
const sts = new STS();
let testService: GetCredential;
beforeEach(() => {
testService = new GetCredential(sts);
});
describe('Given getCredentials has been called', () => {
it('The method returns a credential', async () => {
const credential = {
AccessKeyId: 'AccessKeyId',
SecretAccessKey: 'SecretAccessKey',
SessionToken: 'SessionToken'
};
const mockGetCredentials = jest.fn().mockReturnValue({
promise: () => Promise.resolve({ Credentials: credential }),
});
testService.sts.assumeRole = mockGetCredentials;
const result = await testService.getCredentials('fakeRole');
expect(result).toEqual(credential);
});
}); | unknown | |
d13259 | train | Problem solved guys!
I changed the button of Infopath, before it was reading the a rule for submit, now the button is a submit button!
Hope this can help someone! | unknown | |
d13260 | train | You could do an inner join of main_df with the full outer join of the two vendor dataframes. This way, lines from main_df are kept if and only if there appear at least once in one of the vendor dataframes. In pseudo code: main_df.inner_join(vendor1.full_join(vendor2)).
In spark:
# creating your data
vendor1_df = spark.createDataFrame([(123, 90, 45), (167, 45, 60)], ['Org_code', 'revenue', 'emp_code'])
vendor2_df = spark.createDataFrame([(456, 90, 45), (167, 450, 899)], ['Org_code', 'revenue', 'emp_code'])
main_df = spark.createDataFrame([(123, 'ABC'), (456, 'CDE'), (876, "egf"), (167, 'hnmm')], ['Org_code', 'Org_name'])
# renaming colunms
df1 = vendor1_df.select('Org_code', vendor1_df['revenue'].alias('v1_revenue'), vendor1_df['emp_code'].alias('v1_emp_code'))
df2 = vendor2_df.select('Org_code', vendor2_df['revenue'].alias('v2_revenue'), vendor2_df['emp_code'].alias('v2_emp_code'))
# and the result
all_vendors = df1.join(df2, ['Org_code'], 'full')
main_df.join(all_vendors, ['Org_code']).show()
+--------+--------+----------+-----------+----------+-----------+
|Org_code|Org_name|v1_revenue|v1_emp_code|v2_revenue|v2_emp_code|
+--------+--------+----------+-----------+----------+-----------+
| 123| ABC| 90| 45| null| null|
| 456| CDE| null| null| 90| 45|
| 167| hnmm| 45| 60| 450| 899|
+--------+--------+----------+-----------+----------+-----------+
A: I like ANSI SQL. I have been writing queries since the 1990s. Therefore, lets you spark SQL.
#
# Create dataframes
#
df1 = spark.createDataFrame( [(123, 90, 45), (167, 45, 60)], ['Org_code', 'vendor1_revenue', 'vendor1_emp_code'] )
df2 = spark.createDataFrame( [(456, 90, 45), (167, 450, 899)], ['Org_code', 'vendor2_revenue', 'vendor2_emp_code'] )
df3 = spark.createDataFrame( [(123, 'ABC'), (456, 'CDE'), (876, "egf"), (167, 'hnmm')], ['Org_code', 'Org_name'] )
Make dataframes above and create temporary views below.
#
# Create views
#
df1.createOrReplaceTempView("vendor1")
df2.createOrReplaceTempView("vendor2")
df2.createOrReplaceTempView("main")
Just use a left join since the main table is the driver.
%sql
select
m1.*,
v1.vendor1_revenue,
v1.vendor1_emp_code,
v2.vendor2_revenue,
v2.vendor2_emp_code
from main as m1
left join vendor1 as v1 on m1.org_code = v1.org_code
left join vendor2 as v2 on m1.org_code = v2.org_code
where v1.org_code is not null or v2.org_code is not null
We just need to add a where clause that tosses out rows in which the main table does not match any vendors.
I keep on trying to promote Spark SQL since many technologist know ANSI SQL.
The expected result is in the above image. | unknown | |
d13261 | train | Resolved. Use the following configuration in props.conf
[yourlogtype]
SEDCMD-StripHeader = s/^[^{]+//
KV_MODE = json
pulldown_type = true | unknown | |
d13262 | train | Scheduler, created by SchedulerFactoryBean, has standby() and start() methods, which you can use to control firing of trigger. | unknown | |
d13263 | train | You can add the extension from the options dialog (tools->options). Then under Text Editor-File Extension and then choose the editing experience you want
Once applied you can use it as you did .SQL. In the following image note the highlighting and the result set on the file SQLQuery.rpc | unknown | |
d13264 | train | Ok now that I see that you tried. Is this what you want?
Copied answer from here:
Python - Integer Factorization into Primes
def factorize(num):
for possible_factor in range(2, num):
if num % possible_factor == 0:
return [possible_factor] + factorize(num // possible_factor)
return [num]
nums = [8,153]
for num in nums:
print("{}: {}".format(num, factorize(num)))
Returns:
8: [2, 2, 2]
153: [3, 3, 17] | unknown | |
d13265 | train | This has nothing to do with your contacts but to do with the fonts when transitioning from ios 6 to ios 7. Delete all the fonts in your project and then carefully add one by one to your .plist testing each time before adding the next until you find the one that is causing the problem.
Have a look at this post from someone who also had the same problem as you and the person's answer.
Person have the same problem as you | unknown | |
d13266 | train | The problem is your quotes, you're using " both to delimit your new elements and to set their href attribute, change your code to:
document.write("<a href='index.html'>Home</a>");
document.write("<a href='news.html'>News</a>");
document.write("<a href='about.html'>About us</a>");
Or:
document.write('<a href="index.html">Home</a>');
document.write('<a href="news.html">News</a>');
document.write('<a href="about.html">About us</a>');
Combining single (') and double (") quotes. You could also escape your internal quotes (document.write("<a href=\"index.html\">Home</a>");
BUT it'd be better to use a single call to document.write(), like this:
document.write('<a href="index.html">Home</a>'
+ '<a href="news.html">News</a>'
+ '<a href="about.html">About us</a>');
A: You're not escaping the quotes in your strings. It should be:
document.write("<a href=\"index.html\">Home</a>");
Otherwise, JavaScript thinks the string ends after href= and the rest of the line does not follow valid JavaScript syntax.
As @Felix mentioned, the JavaScript debugger tools will be extremely helpful in letting you know what's going on. | unknown | |
d13267 | train | Ok, I figured it out. I created a directory, files, and sent the output to a file in that directory using the {{ role_path }} variable. In the body portion of the email task, I used the lookup plugin to grab the contents of the file.
Here is the updated playbook with the original lines commented out:
---
- name: Check for output file
stat:
#path: /tmp/ansible_output.txt
path: "{{ role_path }}/files/ansible_output.txt"
register: stat_result
delegate_to: localhost
- name: Create file if it does not exist
file:
#path: /tmp/ansible_output.txt
path: "{{ role_path }}/files/ansible_output.txt"
state: touch
mode: '0666'
when: stat_result.stat.exists == False
delegate_to: localhost
- name: Check hard drive info
become: yes
become_user: root
shell: cat /etc/hostname; echo; df -h | egrep 'Filesystem|/dev/sd'
register: result
- debug: var=result.stdout_lines
#- local_action: lineinfile line={{ result.stdout_lines | to_nice_json }} dest=/tmp/ansible_output.txt
- local_action: lineinfile line={{ result.stdout_lines | to_nice_json }} dest="{{ role_path }}/files/ansible_output.txt"
- name: Email Result
mail:
host: some_email_host
port: some_port_number
username: my_username
password: my_password
to:
- first_email
- second_email
- third_email
from: some_email_address
subject: Ansible Disk Space Check Result
#body: "{{ result.stdout_lines | to_nice_json }}"
body: "{{ lookup('file', '{{ role_path }}/files/ansible_output.txt') }}"
#attach:
#/tmp/ansible_output.txt
attach:
"{{ role_path }}/files/ansible_output.txt"
run_once: true
delegate_to: localhost
- name: Remove Output File
file:
#path: /tmp/ansible_output.txt
path: "{{ role_path }}/files/ansible_output.txt"
state: absent
run_once: true
delegate_to: localhost
Now, my email contains the attachment, as well as the contents in the body, and I didn't have to change much in the playbook. :-) | unknown | |
d13268 | train | for close dialog box simple use by this all dialog box close
they all inherit the same class, this is the best way to select all and close by:
$(".ui-dialog-content").dialog("close");
A: make sure that the buttons and dialogs have a common class say btn for buttons & info for your dialogs
$(".btn").on('click',function(e){
closeBoxes();
var index=$(this).index();
$(".info:eq("+index+")").dialog("open");
}); | unknown | |
d13269 | train | Is your app on the PYTHONPATH? You can check it in the shell by
$ python manage.py shell
and in the shell, check it using
> import sys
> print sys.path
If the app is not on PYTHONPATH, you can add it using project's settings.py.
In settings.py:
import os
import sys
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(PROJECT_ROOT, "polls"))
If that is not the issue, could you also show the urls.py and views.py part of where the template is being invoked?
A: Had the same problem, solved it, try this in the settings.py:
import sys
sys.path.insert(0, "/home/user/django_projects/myproject/")
from here: http://www.bluehostforum.com/showthread.php?21680-DJANGO-Can-t-import-own-module | unknown | |
d13270 | train | A SecureString is encrypted while at rest which is more than just providing a way to prevent it remaining in memory.
Is there a way to erase string completely from memory?
Yes, you need to modify the string in-situ and overwrite its contents.
You can do this using mid$() in LHS mode:
Dim i As Long
For i = 1 To Len(secret)
Mid$(secret, i, 1) = "0"
Next
Or with the ZeroMemory or CopyMemory API:
ZeroMemory ByVal StrPtr(secret), LenB(secret)
...
CopyMemory ByVal StrPtr(secret), ByVal StrPtr(String$(Len(secret), "0")), LenB(secret)
For encryption you could implement the DPAPI CryptProtectData API (which is what SecureString is based on). | unknown | |
d13271 | train | Enum.map(users, fn
%User{id: 2} = user -> %User{user | attempts: 99}
user -> user
end)
You can encapsulate it into some module and give it nice name :)
A: Arguably, most elixirish approach would be to use Access behaviour and Kernel.put_in/3:
put_in users,
[Access.filter(&match?(%User{id: 2}, &1)),
Access.key(:attempts)],
99
#⇒ [
# %User{attempts: 5, id: 1},
# %User{attempts: 99, id: 2},
# %User{attempts: 2, id: 3}
#]
This might be easily extended to update multiple elements by changing the filter function.
A: If you are doing many updates, it's probably best to build a map, so that you don't have to iterate the list for each update.
[
%User{attempts: 5, id: 1},
%User{attempts: 12, id: 2},
%User{attempts: 2, id: 3}
]
|> Map.new(fn user -> {user.id, user} end)
|> Map.update!(2, &%User{&1 | attempts: 99})
|> Map.values()
You can do as many Map.update!/3 calls as you need, to update different ids. | unknown | |
d13272 | train | A1R5G5B5 Bitmasks:
ARRR RRGG GGGB BBBB
ALPHA: 1000 0000 0000 0000 - 0x8000
RED: 0111 1100 0000 0000 - 0x7c00
GREEN: 0000 0011 1110 0000 - 0x3e0
BLUE: 0000 0000 0001 1111 - 0x1f
Use the bitmask with the bitwise AND operator to obtain the value:
$word = /* two-byte (two octets) value per pixel */
$alpha = $word & 0x8000;
$red = $word & 0x7c00;
...
Hope this helps. A PHP function that gives you the integer value for a binary number in PHP is bindec, a function to convert an integer to a hexadecimal number is dechex. Those functions are helpful to create a hexadecimal bitmask number from within PHP.
You can use as well a calculator to convert between binary, decimal and hexadecimal numbers, e.g. with a calculator like gcalctool.
Example code:
/**
* unpack a binary string word of a
* A1R5G5B5 color into an array of
* RGBA integer 8bit values.
*
* @param string $word
* @return array('red' => int, 'green' => int, 'blue' => int, 'alpha' => int)
*/
function wordA1R5G5B5ToArrayRGBA($word)
{
// unpack values from bit-fields
list(, $dec) = unpack('n', $word);
$blue = ($dec & 0x1F);
$green = ($dec & 0x3E0) >> 5;
$red = ($dec & 0x7C00) >> 10;
$alpha = ($dec & 0x8000) >> 15;
// map 5bit to 8bit (alpha: 1bit to 8bit)
$blue = ($blue << 3) | ($blue * 0x7 / 0x1F);
$green = ($green << 3) | ($green * 0x7 / 0x1F);
$red = ($red << 3) | ($red * 0x7 / 0x1F);
$alpha && $alpha = 0xFF;
return compact('red', 'green', 'blue', 'alpha');
} | unknown | |
d13273 | train | Should work fine here is fiddle that tests it out. Could you be calling it too early?
http://jsfiddle.net/mbkq12wu/1/
angular.module('userApp', ['ngResource'])
.factory('Content', function($resource) {
return $resource("http://localhost:3000/matches/:id/content");
});
describe('SO Content Test resource calls matches/1/content', function () {
it('Calls GET – api/:id/content', function() {
$httpBackend
.expectGET('http://localhost:3000/matches/1/content').respond(200);
Content.get({id: 1}, function(){});
$httpBackend.flush();
});
A: Your code is fine. The problem is that $scope.match.id resolves to undefined and which results into resource URL with omitted :id part (and duplicated / also removed).
To verify that resource is all right check
Content.get({id: 123}, function(){});
and also console.log($scope.match.id). | unknown | |
d13274 | train | Have you added your public key to cloudControl? If not, please do this:
cctrluser key.add
For full context please visit our documentation. | unknown | |
d13275 | train | the problem is about vsCode, you should run code . in cmd because if you Open the Command Palette (Ctrl + Shift + P) and type
Shell Command: Install 'code' command in PATH
you won't see noting, after running code . in cmd when you should see like this photo, every things will be fine
A: I’m not sure for Windows, but usually the scripts in node_modules/.bin are symbolic links to scripts. For instance, node_modules/.bin/tsc might point to node_modules/typescript/bin/tsc.
It works outside of the directory because then it uses the global version of tsc.
Seeing your error, I’m suspecting that the symlinks are broken. Maybe just try to remove node_modules directory and redo an npm install.
PS: I’m not familiar with Volta, but it looks more like an NPM problem. | unknown | |
d13276 | train | It could be because you are Sorting your array after you have printed out the elements.
Try arranging your code this way
Arrays.sort(packageNames);
for (Package pack:packs){
System.out.println(pack.getName());
}
Alternatively you could Dump your array in a TreeSet and Iterate through it vis
tr
TreeSet<Package> packages = new TreeSet<Package>(Arrays.asList(packageNames));
then
for (Package pack:packages){
System.out.println(pack.getName());
}
Note that this will remove duplicate packages from your array. | unknown | |
d13277 | train | tl;dr
Rename your script to hello.wlua so that wlua.exe is used.
Details
While it is likely possible, if verbose, to locate and close the offending console window that Windows provided your process, it would be better if that console never appeared in the first place. If it does appear, then it is likely to flash on screen, and cause some users to be confused.
Subsystems
Windows has, since its earliest days, had the concept of a "subsystem" which each individual executable identifies with. Normal GUI applications are linked with /SUBSYSTEM:WINDOWS and get the full GUI treatment including the responsibility to create and display their own window(s) if and when needed.
Applications that expect to be run from a command line (or batch file) are linked with /SUBSYSTEM:CONSOLE, and as a result have standard file handles that are guaranteed to be open and are likely to be connected to some console window (or a pipe, or redirected to a file, but they do exist). That guarantee is strong enough that when a console program is started outside of a console (as when double-clicked from Exporer, or named in the Start|Run box) then the system automatically creates a console window for it, and binds the standard file handles to the new console.
There are other subsystems, but those two are the only important ones for normal users and developers.
lua.exe and wlua.exe
So why does this matter?
The stock lua.exe will be linked for the console, because that makes it possible to use interactively from a command prompt. However, it means that it will always be supplied with a console window even when you don't want one.
The Lua for Windows distribution (which from the pathname showing in your console's title bar it looks like you are using) includes a second copy named wlua.exe which only differs by being linked for the Windows subsystem. As a result, it only displays a window if the script explicitly creates one to display. Of course, it also means that it cannot be used interactively at the command prompt.
File types and associations
For convenience, you can associate the file type .wlua with wlua.exe, and name your GUI script with that file type. That will enable launching programs in the usual way without getting the extra consoles. Of course, when debugging them, you can always run them with lua.exe from a command prompt and take advantage of the existence of stdout and the utility of the print function.
On my PC (64-bit Win 7 Pro) I have the following associations, which look like they were created by the installation of Lua for Windows:
C:...>assoc .lua
.lua=Lua.Script
C:...>ftype lua.script
lua.script="C:\Program Files (x86)\Lua\5.1\lua.exe" "%1" %*
C:...>assoc .wlua
.wlua=wLua.Script
C:...>ftype Wlua.script
Wlua.script="C:\Program Files (x86)\Lua\5.1\wlua.exe" "%1" %*
Extra credit: PATHEXT
You could also add .lua to the PATHEXT environment variable to save typing the file type at the command prompt. I'm not configured that way presently, but have in the past done that. I found that the standard practice of naming both modules and scripts with the same file type made that less useful.
The PATHEXT environment variable lists the file types that will be searched for in the PATH when you name a program to run without specifying its file type. Documentation for this is rather hard to locate, as there does not appear to be a single MSDN page listing all the "official" environment variables and their usage. This chapter of a book about Windows NT has a nice description of the interaction of PATH and PATHEXT, and despite being subtly out of date in some respects, it is the clearest detailed explanation of how the command prompt operates that I've come across.
It clarifies that each folder in PATH is searched for each extension named in PATHEXT:
If the command name includes a file extension, the shell searches each directory for the exact file name specified by the command name. If the command name does not include a file extension, the shell adds the extensions listed in the PATHEXT environment variable, one by one, and searches the directory for that file name. Note that the shell tries all possible file extensions in a specific directory before moving on to search the next directory (if there is one).
It also documents how file types and associations interact with the command prompt. Despite its age, it is well worth the read.
A: Windows executables explicitly list the subsystem they run on. As the windows "lua.exe" is linked for the console subsystem, windows automagically creates a console window for it. Just relink "lua.exe" for gui subsystem, and you won't get to see the output any more unless you run it from a console window. BTW: Gui programs can programmatically create the console.
An alternative is closing the created console on start.
For that, you must first use SetStdHandle to redirect STDIN, STDOUT and STDERR (use a file open to device nul if you don't want it at all), and then call FreeConsole to finally dismiss your unloved console window. No sweat, you have "alien" set up already...
A: If you can use winapi module or have similar calls in Alien, you can find the handler of the console window and hide the window itself. The code would be similar to this:
require winapi
local pid = winapi.get_current_pid()
local wins = winapi.find_all_windows(function(w)
return w:get_process():get_pid() == pid
and w:get_class_name() == 'ConsoleWindowClass'
end)
for _,win in pairs(wins) do win:show_async(winapi.SW_HIDE) end
You'll need to check if this leave the MessageBox visible or not.
A: Programmatic solution (run the same script under wlua.exe if possible)
do
local i, j = 0, 0
repeat j = j + 1 until not arg[j]
repeat i = i - 1 until not arg[i-1]
local exe = arg[i]:lower()
-- check if the script is running under lua.exe
if exe:find('lua%.exe$') and not exe:find('wlua%.exe$') then
arg[i] = exe:gsub('lua%.exe$','w%0')
-- check if wlua.exe exists
if io.open(arg[i]) then
-- run the same script under wlua.exe
os.execute('"start "" "'..table.concat(arg,'" "',i,j-1)..'""')
-- exit right now to close console window
os.exit()
end
end
end
-- Your main program is here:
require "luarocks.require"
require "alien"
local MessageBox = alien.User32.MessageBoxA
MessageBox:types{ret = "long", abi = "stdcall", "long", "string", "string", "long" }
MessageBox(0, "Hello World!", "My Window Title", 0x00000040) | unknown | |
d13278 | train | Somtimes we have to write ip address instead of "http://mywebsite.com"; so try onece using Ip address.
check also The INTERNET permission tag is a child of the manifest tag, not the application tag.!
and check this once hope this helpful to you | unknown | |
d13279 | train | This is a bit tricky because there will be repetion if statuses are the same,so the coalesce has to be on the outside of the GROUP_CONCAT
SET @sql = NULL;
SELECT
GROUP_CONCAT(DISTINCT
CONCAT(
'COALESCE(GROUP_CONCAT(DISTINCT case when branch = ''',
branch,
''' then status end),''OK'') AS ',
CONCAT('Branch',branch)
)
) INTO @sql
FROM Table1;
SET @sql = CONCAT('SELECT productName, ', @sql, '
FROM Table1
GROUP BY productName');
PREPARE stmt FROM @sql;
EXECUTE stmt;
Link | unknown | |
d13280 | train | As stated in my answer to your previous question you cannot have a model named File as there is already a class named File in CakePHP's core code. You need to use an alternative name for your model. Think of File as a reserved word.
The reason why your code is sometimes working comes down to whether your model or Cake's File utility is touched first. In the case where your code is failing File is being used as the utility class rather than your model so isn't compatible.
Think about it: if Cake tries to initiate an instance of the class File (i.e. new File(/* some params */)) which File class will it use in your code? The fact that this is unclear should give you the clue that you shouldn't be using a model with the same name as the utility class. I suspect your error logs contain more errors/warnings about your code as a result of this.
Unfortunately you have no choice but to change the name of your model. However, you may be able to change some of the renamed model class' attributes so that it still behaves as the File model (this is untested as I've never needed to do this, but might be worth a try). For example, rename the class (and filename) to AppFile and then set the $name, $alias and $table properties so that the model behaves as File:-
<?php
class AppFile extends AppModel {
public $name = 'File';
public $alias = 'File';
public $table = 'files';
} | unknown | |
d13281 | train | Maybe what you are looking for is to add your Building objects into an array:
var buildings = [];
buildings.push(b1);
buildings.push(b2);
Then, you can loop as you need to:
for (var i=0, len=buildings.length; i < len; i++){
displayOnMap(buildings[i]);
} | unknown | |
d13282 | train | I would consider using something like md5 for unique filenames.
Nevertheless you can push filenames into some array, and than return those filenames, as a result of post request, and put them back into some input field.
To retrieve the response simply add this lines to your code below open
xhr.onreadystatechange = function {
// If the request completed and status is OK
if (req.readyState == 4 && req.status == 200) {
// keep in mind that fileNames here are JSON string
// as you should call json_encode($arrayOfFilenames)
// in your php script (upload.php)
var fileNames = xhr.responseText;
}
}
If you'd like consider using a simple library for AJAX requests, like axios. It's promise based HTTP client for the browser, really simple to use and saves you some time and effort cause you don't have to memorize all this stuff you and I have just written.
This is one approach, but I think you can use $_SESSION as well, and it's perfectly valid. My guess is you don't have logged in user at this point, so my idea is as follows:
*
*put filenames into the $_SESSION
*use db transactions - as @Marc B suggested - to connect files with
user
*if there were no errors just remove filenames from $_SESSION, if there was some, just redirect the user back to the form (possibly with some info what went wrong), and this way he doesn't have to reupload files, cause you have filenames still in $_SESSION | unknown | |
d13283 | train | For SQL Server, you can use CASE expressions to conditionally determine the amount you need to add, then SUM them together, like this:
SELECT Process, SUM(Volume) AS TotalVolume,
SUM(CASE WHEN TAT = 'Pass' THEN Volume ELSE 0 END) AS Pass,
SUM(CASE WHEN TAT = 'Fail' THEN Volume ELSE 0 END) AS Fail
FROM (
-- Dummy data here for testing
SELECT 1 AS Process, 1 as Volume, 'Pass' AS TAT
UNION SELECT 1, 2, 'Fail'
UNION SELECT 2, 5, 'Fail'
UNION SELECT 2, 5, 'Pass'
UNION SELECT 3, 1, 'Pass'
UNION SELECT 4, 6, 'Fail'
UNION SELECT 4, 4, 'Pass'
) MyTable
GROUP BY Process
ORDER BY Process
For Microsoft Access, CASE isn't supported, so you can use SWITCH or IIF, like so:
SUM(IIF(TAT = 'Pass', Volume, 0)) AS Pass | unknown | |
d13284 | train | I imagine you are building to a folder /dist and the app.js being conflited is the one inside of it.
You should ignore the /dist altogether. This folder is generated on the building process, meaning everyone that runs the project will update and create it.
A: Here is the default vue-cli .gitignore:
.DS_Store
node_modules
/dist
# local env files
.env.local
.env.*.local
# Log files
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Editor directories and files
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw*
Not that not anything here may be useful to put in your own .gitignore. But you should for sure have at least node_modules and /dist.
A: If you are building the Vue project by scratch then I can say the following, when building/compiling your Vue project, best practices say that you should handle your entire production ready project in a dist/ or build/ directory where your main app.js file where the conflicts you are having would occur. This directory is only reserved for deploying the app and is not saved into your code repository, hence on why you should add to the .gitignore file the directory that holds such production files. | unknown | |
d13285 | train | This sed example has two replace commands, one for the first line (header) and one for last line trailer (denoted by $ in the second substitution). -i option of sed edits the file in place.
sed -i '1 s/^.*value=/yoursubstitution value=/; $ s/^.*value=/yoursubstitution value=/'
output:
yoursubstitution value=file1id
body: blah blah blah
yoursubstitution value=file1id
A: You can match from the beginning of the line (^) to value=
echo 'header: 123xxx xxx value=file1id' | sed 's/^.*value=/header: 321aaa aaa value=/'
header: 321aaa aaa value=file1id | unknown | |
d13286 | train | You have to do something like this.
public GenericStack(int size) {
stackData = new Object[size];
tos = -1;
}
Then later
@SuppressWarning("unchecked")
public T pop() {
// check stack state
return (T)stackData[++tos];
} | unknown | |
d13287 | train | See my example
*
*First child component works as expected (your code)
*Second displays "NONE" because it's data is initialized with prop value, which is undefined at the time the (child's) data() is executed. Any change to the prop in the future (in mounted in my example) wont affect child's data...
const child = Vue.component('child', {
data() {
return {
nameData: this.name
}
},
props: {
name: {
type: String,
default: "NONE"
}
},
template: `<div> {{ nameData }} </div>`
})
const vm = new Vue({
el: "#app",
components: {
child
},
data() {
return {
firstName: "Charles",
secondName: undefined
}
},
mounted() {
this.secondName = "Fred"
}
})
<script src="https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.17/vue.js"></script>
<div id="app">
<child :name="firstName"></child>
<child :name="secondName"></child>
</div>
A: name="charles" - you passed down the string "charles";
:name="firstName" - you passed down a variable "firstName" which seems to be undefined in the parent component at the time of child rendering and the prop in the child component gets the default value you provided it with.
UPD: I played a little with Michal's example. You can use computed instead of data() {} or directly a prop itself if you don't need any data transformation. Because it seems that you assign parent's firstName value in async mode or just later.
const child = Vue.component('child', {
computed: {
nameData() {
return this.name;
}
},
props: {
name: {
type: String,
default: "NONE"
}
},
template: `<div> {{ nameData }} </div>`
})
const vm = new Vue({
el: "#app",
components: {
child
},
data() {
return {
firstName: "Charles",
secondName: undefined
}
},
mounted() {
this.secondName = "Fred"
}
})
<script src="https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.10/vue.js"></script>
<div id="app">
<child :name="firstName"></child>
<child :name="secondName"></child>
</div> | unknown | |
d13288 | train | There is no single command that you can list all the permission. if you are interested to use some tool then you can try a tool for quickly evaluating IAM permissions in AWS.
You can try this script as well, As listing permission with single command is not possible you can check with a combination of multiple commands.
#!/bin/bash
username=ahsan
echo "**********************"
echo "user info"
aws iam get-user --user-name $username
echo "***********************"
echo ""
# if [ $1=="test" ]; then
# all_users=$(aws iam list-users --output text | cut -f 6)
# echo "users in account are $all_users"
# fi
echo "get user groups"
echo "***********************************************"
Groups=$(aws iam list-groups-for-user --user-name ahsan --output text | awk '{print $5}')
echo "user $username belong to $Groups"
echo "***********************************************"
echo "listing policies in group"
for Group in $Groups
do
echo ""
echo "***********************************************"
echo "list attached policies with group $Group"
aws iam list-attached-group-policies --group-name $Group --output table
echo "***********************************************"
echo ""
done
echo "list attached policies"
aws iam list-attached-user-policies --user-name $username --output table
echo "-------- Inline Policies --------"
for Group in $Groups
do
aws iam list-group-policies --group-name $Group --output table
done
aws iam list-user-policies --user-name $username --output table
A: Kindly run below one liner bash script regarding to list all users with their policies, groups,attached polices.
aws iam list-users |grep -i username > list_users ; cat list_users |awk '{print $NF}' |tr '\"' ' ' |tr '\,' ' '|while read user; do echo "\n\n--------------Getting information for user $user-----------\n\n" ; aws iam list-user-policies --user-name $user --output yaml; aws iam list-groups-for-user --user-name $user --output yaml;aws iam list-attached-user-policies --user-name $user --output yaml ;done ;echo;echo | unknown | |
d13289 | train | Apparently the order in which you add the URIs counts. If you have set like below, USER_DETAILS won't be recognized anymore. You have to switch the order and add USER_DETAILS first.
sURIMatcher.addURI(AUTHORITY, BASE_PATH_USERS + "/*", USER_ID);
sURIMatcher.addURI(AUTHORITY, BASE_PATH_USERS + "/details", USER_DETAILS); | unknown | |
d13290 | train | For rapid searching the data store, I would suggest creating an index of the urls (or any other string based criteria) that is based on a suffix tree data structure. The search would be done in O(k), where k is the length of the url (which is really fast). A good introduction to such kind of trees you could find here.
When it comes to logging, try not to store them by one. I/O operations are quite resource intensive and are in most cases the bottlenecks of such systems. Try to write the urls into your data store in batch. For example keep the submitted urls in memory and store them only by 1000 chunks at once. Just remember to update on some background or scheduled task the suffix tree to keep the data synchronized.
A: I was faced with this exact issue in SQL Server and the solution for me was a table to store all my unique URLS/TITLES with a unique key on a two computed columns containing a checksum of URL and TITLE. It took up about a tenth of the space as an equivalent key on the string URL/Title.and was 10X faster than a direct index.
I'm using SQL server so the statement was
(checksum([URL],(0)))
and
(checksum([URL],(0)))
I found this for MySql.
Since most of the traffic came from many of the same websites, it allowed me to consolidate urls/titles without having to search the whole table with each insert to enforce the unique constraint. My Procedure just returned an url/title PK if it already existed.
To tie to your users, use a USER_URL table with a FK of the PK of USER and URL.
Good luck.
A: I wish there was a datatype on mysql for URIs. But since oracle has it and mysql is now oracle, this might happen someday...
http://download.oracle.com/docs/cd/B19306_01/server.102/b14200/sql_elements001.htm#i160550 | unknown | |
d13291 | train | By setManager(prevState => ({...prevState , data : res.data})) you're simply overriding your earier 'main' data property.
data is an array, new values are in array ... simply concat them
setManager(prevState => ({...prevState ,
data : prevState.data.concat( res.data )
}))
After that you should have
[
{name: undefined , project : []},
{name: "john doe"},
{name: "jane doe"}
]
... but probably you wanted to manage names and project separately:
const [manager, setManager] = useState({
data: {
name: undefined,
project: []
}
});
... or even
const [manager, setManager] = useState({
name: undefined,
project: []
});
This way you don't need to 'address' this data with 'intermediatory' .data
<Component names={manager.name} />
... not by {manager.data.name}.
Of course updating only name needs different code
setManager(prevState => ({
...prevState,
name : prevState.name.concat( res.data )
}))
*
*...prevState prevents deleting project property
*name should be initialized with [], not undefined
*conditional rendering can be done with `{!manager.name.lenght && ... }
If you have separate states 'slices' then no need to use one common state, use many useState declarations.
A: Probably what you need is:
const [manager, setManager] = useState({
data: [{ name: undefined, project: [] }]
});
setManager(prevState => ({
data: prevState.data.map((item, index) => ({ ...item, ...res.data[index] }))
}));
However, if you're just storing an array of "items", then your state should look more like:
const [manager, setManager] = useState([{ name: undefined, project: [] }]);
setManager(prevState =>
prevState.data.map((item, index) => ({ ...item, ...res.data[index] }))
);
Also, the way how you're gonna merge prev and incoming data depends on many things and maybe you need some more sophisticated way of storing state.
A: You inited your state with an Array, not an Object, that's the reason for the behavior.
Change this from
useState({data : [{name : undefined , project : []}] });
to
useState({data : {name : undefined , project : []} }); | unknown | |
d13292 | train | The return value of kmeans is a S3 object which contains not only the centers, but also the cluster assignment.
See the R manual of kmeans for details. | unknown | |
d13293 | train | I just need to add "SceneKit.framework" to Link Binary With Libraries:
A: Same problem here. I had a pure Objective-C class, simply named "GameViewController.m". To turn it into an Obj-C++ class, change the ".m" to ".mm".
However, just by changing the extension, the linker spat back 13 undefined symbols....
"_OBJC_CLASS_$_SCNTransaction", referenced from: ...
"_OBJC_CLASS_$_SCNFloor", referenced from: ...
"_OBJC_CLASS_$_SCNMaterial", referenced from: ...
etc...
To get around this, just create a different (new) Obj-C++ class, with a .mm extension, and place your C++ code in that class.
Now you can instantiate that new class from the class containing SceneKit calls and call the methods in the new Obj-C++ class.
Inside the GameViewcontroller.m class:
...
scene = [SCNScene sceneNamed:@"art.scnassets/grid.dae"];
kinematics = [[KinematicScene alloc] init];
[kinematics setupCharacters];
...
And inside the new Obj-C++ class (KinematicsScene.mm)
@implementation KinematicScene
{
AI::Location *location;
}
-(id) init
{
self = [super init];
if( self )
{
AI::KinematicSeek* seek = new AI::KinematicSeek[2];
location = new AI::Location[2];
}
return self;
}
-(void) setupCharacters
{
.....
}
... etc ....
@end
A: This worked for me:
*
*Disable module support [CLANG_ENABLE_MODULES, -fmodules]. This has user experience downsides. Read the online help for this setting before disabling it, and also be aware that if you disable it you will become responsible for hunting down the libraries and frameworks you need to link against, and this can be tedious if you are not an old hand.
*Build and (attempt to) link. Add the frameworks and libraries you need to resolve the link errors. I recommend creating a group in your project and directing it to your SDK, then adding the frameworks and libraries relative to that SDK. | unknown | |
d13294 | train | When you are using the following query:
val citiesRef = db.collection("cities").whereNotEqualTo("capital", true)
It means that you are trying to get all documents in the cities collection where the capital field holds the opposite value of the Boolean true, which is false. So you'll only get the documents where the capital field holds the value of false. The above query is the same with:
val citiesRef = db.collection("cities").whereEqualTo("capital", false)
Since null, is not the negation of true, the documents that have the capital field set to null, won't be returned. And it makes sense since null is not a boolean type, not a string type, nor any other supported data types. Null represents "none" or "undefined" and it's a distinct data type and cannot be considered "not equal" to any other data type.
That's the same as the docs states:
Note that null field values do not match != clauses | unknown | |
d13295 | train | I think the component that you will find most useful is TableLayoutPanel. Find it under “Containers” in the Toolbox. Set the TableLayoutPanel’s Dock = Fill.
You can use it to lay out the controls in columns and rows. Once a control is inside the TableLayoutPanel, you can use the ColumnSpan property on such a control to span it across multiple columns; I’d use this for the button row at the bottom, i.e. make a new panel for the button row and put the buttons inside that. For the icon, of course, use RowSpan instead.
Experiment with various values of Anchor, AutoSize and AutoSizeMode for some of the controls, especially the message label that you want to grow automatically. If you set the TableLayoutPanel and the Form to AutoSize = true, then the window will grow automatically with the text contents.
A: You could try handling the TextChanged event of the label and measure the size of the string using something like this:
Graphics g = Graphics.FromHwnd(this.Handle);
SizeF s = g.MeasureString(yourLabel.Text, yourLabel.Font, yourLabel.Width);
After this, knowing the sizes of the other controls you can modify the size of the window accordingly. I am assuming that you only want to resize the window vertically.
A: Try a TableLayoutPanel for the layout and set its Dock property to Fill to occupy the entire Form. Then plop your "one-line" and "message" labels into their respective cells and set their Dock properties to Fill to occupy the entire cell.
If you really want to resize the entire Form to fit any message at runtime, you may have to use Graphics.MeasureString to determine the area you need to contain the string and then resize the form to contain that area.
A: Create your own new Form and show it as a dialog box. You can put whatever/however you want on that form.
Here you have a tutorial that will show you how to do the hardest part.
A: You might try to ask for the position of the last character
TextBox box = new TextBox();
box.Text = "...";
var positionOfLastCharacter = box.GetPositionFromCharIndex(box.TextLength);
The you can calculate the necessary height of the textbox and the form.
Edit: That will give you the top left corner of the last character, you should add 10px or so to make the last line fit. | unknown | |
d13296 | train | I think your cut and grep commands could get stuck. You probably should make sure that their parameters aren't empty, by using the [ -n "$string" ] command to see if $string isn't empty. In your case, if it were empty, it wouldn't add any files to the command that would use it afterwards, meaning that the command would probably wait for input from the command line (ex: if $string is empty and you do grep regex $string, grep wouldn't receive input files from $string and would instead wait for input from the command line). Here's a "complex" version that tries to show where things could go wrong:
while [[ $startTime -le $endTime ]]
do
thisfile=$(find * -type f)
if [ -n "$thisfile" ]; then
thisfile=$(grep -l $startDate $thisfile)
if [ -n "$thisfile" ]; then
thisfile=$(grep -l $startTime $thisfile)
if [ -n "$thisfile" ]; then
thisfile=`cut -d$ -f2 $thisfile`
if [ -n "$thisfile" ]; then
forDestination=`cut -d ~ -f4 $thisfile`
echo $fordestination
fi
fi
fi
fi
startTime=$(( $startTime + 1 ))
done
And here's a simpler version:
while [[ $startTime -le $endTime ]]
do
thisfile=$(grep -Rl $startDate *)
[ -n "$thisfile" ] && thisfile=$(grep -l $startTime $thisfile)
[ -n "$thisfile" ] && thisfile=`cut -d$ -f2 $thisfile`
[ -n "$thisfile" ] && cut -d ~ -f4 $thisfile
startTime=$(( $startTime + 1 ))
done
The "-R" tells grep to search files recursively, and the && tells bash to only execute the command that follows it if the command before it succeeded, and the command before the && is the test command (used in ifs).
Hope this helps =) | unknown | |
d13297 | train | Use one folder as the reference for another with --reference=SOURCE:
$ cd "$(mktemp --directory)"
$ touch -m -t 200112311259 ./first
$ touch -m -t 200201010000 ./second
$ ls -l | sed "s/${USER}/user/g"
total 0
-rw-r--r-- 1 user user 0 Dec 31 2001 first
-rw-r--r-- 1 user user 0 Jan 1 2002 second
$ touch -m --reference=./first ./second
$ ls -l | sed "s/${USER}/user/g"
total 0
-rw-r--r-- 1 user user 0 Dec 31 2001 first
-rw-r--r-- 1 user user 0 Dec 31 2001 second | unknown | |
d13298 | train | Commit will make the database commit. The changes to persistent object will be written to database. If you don't commit you will loose the changes you made in the database.
A: A transaction MUST ends, either by a commit or by a rollback.
Why ?
A transaction is consuming resources:
*
*some bytes in memory
*usually a JBDC connection. (or any kind of connection to a transnational external resource)
So, if a tx never ends : it will use a JDBC connection forever and there a good chances that you run out of database connections.
Conclusion : you don't need to commit every tx, but you need to terminate them : either by a commit or a rollback (there is no other end state for a tx)
A: Well it is not only with hibernate transaction but with all database transactions. Commit/ Rollback are Atomicity of ACID (Atomicity, Consistency, Isolation, Durability) properties which actually represent/specifies TRANSACTION. Atomicity is more like do or die.
Answer to your question:
//creates something like cache/temporary space for you to perform all your operations. Note this changes will not be reflected in your database at this point.
Session.beginTransaction();
//perform some db operations
//this line flushes/post your changes from temporary stuff to database. If your changes contains error then this will not be affected/made changes to the database else the changes will be affected.
Session.commit();
Hope this is helpful!
A: A transaction must be closed. So by committing the transaction would automatically be closed as long as current context property mentioned in hibernate.cfg.xml is is thread and not managed.
This is to maintain the ACID properties of transaction. Also a when a transaction is begin it is allocated a lot of memory and resources.
What the best practices suggest is you should roll back the entire transaction and close the session if there's exception in catch block and you should commit the transaction in the last part of try block rather than finally block. | unknown | |
d13299 | train | You can get the nodes as shown in below code and change the value.
[xml]$xmlData = Get-Content $EnvConfigFileName
$xmlData.environment = "Data"
#Save the file
$xmlData.Save($EnvConfigFileName) | unknown | |
d13300 | train | You can set the user of the instance wrapped in the form to request.user:
from django.contrib.auth.decorators import login_required
@login_required
def newproject(request):
if request.method == "POST":
form = NewProjectForm(request.POST, request.FILES)
if form.is_valid():
form.instance.user = request.user
form.save()
return redirect('main:dashboard')
else:
messages.error(request, "Error")
else:
form = NewProjectForm()
return render(request,
"main/newproject.html",
{"form":form})
Note: You can limit views to a view to authenticated users with the
@login_required decorator [Django-doc]. | unknown |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.