source
sequence | text
stringlengths 99
98.5k
|
---|---|
[
"stackoverflow",
"0060887055.txt"
] | Q:
Flask SqlAlchemy AttributeError: 'str' object has no attribute '_sa_instance_state'
So, I am trying to add images name that I save in specified directory, but this error keeps coming and nothing is been added in the database, Although the images keep getting saved in the specified directory.
Here are all my files
Models.py
from shop import db
from datetime import datetime
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
price = db.Column(db.Numeric(10,2), nullable=False)
stock = db.Column(db.Integer, nullable=False)
desc = db.Column(db.Text, nullable=False)
pub_date = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
brand_id = db.Column(db.Integer, db.ForeignKey('brand.id'),
nullable=False)
brand = db.relationship('Brand',
backref=db.backref('brands', lazy=True))
category_id = db.Column(db.Integer, db.ForeignKey('category.id'),
nullable=False)
category = db.relationship('Category',
backref=db.backref('categories', lazy=True))
image_1 = db.Column(db.String(256), nullable=False, default='image1.jpg')
image_2 = db.Column(db.String(256), nullable=False, default='image2.jpg')
image_3 = db.Column(db.String(256), nullable=False, default='image3.jpg')
def __repr__(self):
return '<Product %r>' % self.name
class Brand(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False, unique=True)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False, unique=True)
db.create_all()
forms.py
from flask_wtf.file import FileAllowed, FileField, FileRequired
from wtforms import Form,StringField, IntegerField, BooleanField, TextAreaField, validators
class AddProducts(Form):
name = StringField("Name", [validators.DataRequired()])
price = IntegerField("Price:RS ", [validators.DataRequired()])
stock = IntegerField("Stock", [validators.DataRequired()])
desc = TextAreaField("Description", [validators.DataRequired()])
# colors = TextAreaField("Colors", [validators.DataRequired()])
image_1 = FileField('Image 1', [FileRequired(), FileAllowed(['jpg, jpeg, png, svg, gif']), "Images Only please"])
image_2 = FileField('Image 2', [FileRequired(), FileAllowed(['jpg, jpeg, png, svg, gif']), "Images Only please"])
image_3 = FileField('Image 3', [FileRequired(), FileAllowed(['jpg, jpeg, png, svg, gif']), "Images Only please"])
*routes.py
@app.route('/addproduct', methods=["GET", "POST"])
def addproduct():
brands = Brand.query.all()
categories = Category.query.all()
form = AddProducts(request.form)
if request.method == "POST":
name = form.name.data
price = form.price.data
stock = form.stock.data
desc = form.desc.data
brand = request.form.get('brand')
category = request.form.get('category')
image_1 = photos.save(request.files['image_1'] , name=secrets.token_hex(10) + '.')
image_2 = photos.save(request.files['image_2'] , name=secrets.token_hex(10) + '.')
image_3 = photos.save(request.files['image_3'] , name=secrets.token_hex(10) + '.')
print(f"Image 1 name:{image_1}, its type:{type(image_1)}")
product = Product(name=name, price=price, stock=stock, desc=desc, brand=brand, category=category,
image_1=image_1,image_2=image_2, image_3=image_3)
db.session.add(product)
flash(f"{name} has been added to database.", 'success')
db.session.commit()
return redirect(url_for('admin'))
return render_template('products/addproduct.html', title='Add Product', form=form, brands=brands,
categories=categories)
All the images type are strings, and model fields are string too, still I keep getting this error.
Here is my html page for this form
{% extends 'layout.html' %}
{% block body_block %}
{% include '_messages.html' %}
<div class="container">
<div class="row">
<div class="col-md-2"></div>
<div class="col-md-8">
<h2 class="text-center bg-info p-2">
Add product
</h2>
{% from '_formhelpers.html' import render_field %}
<form action="" method="POST" enctype="multipart/form-data">
{{ render_field(form.name, class="form-control", placeholder="Product Name")}}
{{ render_field(form.price, class="form-control", placeholder="Price") }}
{{ render_field(form.stock, class="form-control", placeholder="Stock") }}
<label for="brand">Add a Brand</label>
<select class="form-control" name="brand" id="brand">
<option value="" class="form-control" required> Select a Brand</option>
{% for brand in brands%}
<option value="brand.id" class="form-control">{{brand.name}}</option>
{% endfor %}
</select>
<label for="category">Add a Category</label>
<select class="form-control" name="category" id="category">
<option value="" class="form-control" required> Select a Category</option>
{% for category in categories %}
<option value="category.id" class="form-control">{{category.name}}</option>
{% endfor %}
</select>
{{ render_field(form.desc, class="form-control", placeholder="Product Description", rows=10) }}
<div class="container">
<div class="row">
<div class="col-md-4">
{{ render_field(form.image_1, class="form-control")}}
</div>
<div class="col-md-4">
{{ render_field(form.image_2, class="form-control")}}
</div>
<div class="col-md-4">
{{ render_field(form.image_3, class="form-control")}}
</div>
</div>
</div>
<button type="submit" class="btn btn-outline-info mt-4">Add Product</button>
</form>
</div>
<div class="col-md-2"></div>
</div>
</div>
{% endblock body_block %}
A:
I've simplified your example below. The error is occurring on the 5th line:
image_1 = photos.save()
image_2 = photos.save()
image_3 = photos.save()
print(f"Image 1 name:{image_1}, its type:{type(image_1)}")
product = Product(brand=brand, category=category) # <- HERE
db.session.add(product)
Since you mentioned that it's saving the images to the directory, which indicates the first 3 lines are successful. And nothing gets added to the database, indicating that the error is between line 4 and 6.
Since you've specified in your models.py that attributes brand and category are relationship attributes (db.relationship()) These attributes are expecting an ORM object, and you're passing them a string.
Instantiate the ORM objects before passing them to Product like so:
brand = Brand(name="brandname")
category = Category(name="categoryname")
product = Product(brand=brand, category=category)
This is what's indicated in this answer, understandably it's a long read: https://stackoverflow.com/a/55877355/3407256
|
[
"stackoverflow",
"0058152177.txt"
] | Q:
Conditional filter multiple columns python
I have 5 columns, each of them has 0's and 1's in each row. I need to filter all those with '1' at once.
I tried this but results in error:
df_2 = df_1[df_1.columns[0:5] == 1]
ValueError: Item wrong length 2 instead of 111249
A:
I believe you need any if want filter at least one 1 per rows of filtered columns:
df_2 = df_1[(df_1.columns[0:5] == 1).any(axis=1)]
Or all if want filter all 1 per rows of filtered columns:
df_2 = df_1[(df_1.columns[0:5] == 1).all(axis=1)]
|
[
"stackoverflow",
"0034186682.txt"
] | Q:
Rails, how to test 400 response with missing parameter
I'm working with Rails 4.2.5, and I'm trying to test an expected 400 response from a controller in case of malformed request. The parameter validation logic is handled by strong_parameters.
the context
In my controller I have:
def user_params
params.require(:user).permit(:foo, :bar, :baz)
end
And I'm referencing user_params in a POST request with Accept and ContentType headers set to application/json.
In development, POSTing without a user parameter will raise a ActionController::ParameterMissing exception.
If I set this in my environments/development.rb file:
Rails.application.configure do
config.consider_all_requests_local = false
end
Rails will act like in production and return a simple 400 response instead of the debug page. Great.
the problem
I'm having troubles doing the same thing in my controller tests, though (rspec-rails ~> 3.4).
Specifically, this will pass:
expect {
post :action, malformed_params
}.to raise_error(ActionController::ParameterMissing)
but this won't because of the raised exception:
post :action, malformed_params
expect(response.status).to eql 400
I have already tried to flip the consider_all_requests_local for the test environment, to no avail.
Any solution?
There is an old answer on this, but it does not help (and even the asker recognized it was not useful).
Edit:
As asked in the comments, here are the parameters from the controller spec:
let(:user_data) do
{ foo: "foo", bar: "bar", baz: "baz" }
end
let(:good_params) do
{
some_id: SecureRandom.uuid,
user: user_data
}
end
let(:malformed_params) do
{ some_id: SecureRandom.uuid }.merge(user_data)
end
let(:incomplete_params) do
{ some_id: SecureRandom.uuid }
end
good_params works for the happy path. Neither malformed_params nor incomplete_params work when I want to test the 400: both will raise the exception and cause the test to fail.
I've already verified that the same parameter payloads will work when POSTing to a running server (in development, but with the configuration tweaked as described above).
A:
If you want to simulate production across all environments and always raise 400 when any required parameter is missing, instead of changing each config file, you could just put in your controller (or ApplicationController or any other mixin to make it more general):
# assuming we're talking about a JSON API
rescue_from ActionController::ParameterMissing do |e|
render json: { error: e.message }, status: :bad_request
end
# or more general for any kind of response
rescue_from ActionController::ParameterMissing do |e|
head :bad_request
end
|
[
"math.stackexchange",
"0002272580.txt"
] | Q:
Why is eigendecomposition $V \Lambda V^{-1}$ not $V^{-1} \Lambda V$
Say you have a linear transformation matrix $A$. In the basis of eigenvectors, this transformation simply becomes a scaling, represented by the diagonal matrix of eigenvalues.
Thus, intuitively the transformation A can be decomposed into the following:
Transform into the basis of eigenvectors (using the transformation matrix $V$, where the eigenvectors form the columns)
Apply the scaling.
Transform back.
This would seem to correspond to $V^{-1} \Lambda V$, where the standard notation of matrices being applied on the left and vectors on the right holds.
Yet everywhere I always see the formula as $V \Lambda V^{-1}$. Why isn't my intuition correct?
A:
It all depends on what you define your coordinate transformation matrix $V$ to be; obviously if you replace it by the inverse matrix (which carries the same information) then the two possible formulae for the diagonalisation are interchanged. Now typically people take $V$ to be matrix whose columns contain the coordinates of a chosen basis of eigenvectors, the coordinates being expressed of course in terms of the basis for which the matrix $A$ was originally expressed. And it is a sad fact of life that multiplying by that matrix will perform the coordinate transformation in the opposite sense, in other words convert a vector expressed in coordinates on the basis of eigenvectors to its expression in the original basis. Think of it: if you apply $V$ to a standard basis vector, the result is a column of $V$, and therefore will express an eigenvector (one whose coordinates with respect to the eigenvector basis are given by that standard basis vector) in coordinates with respect to the original basis.
A:
The formula was generated from the equation $AV=V\Lambda$ which is a compact way of presentation a set of formulas $Av_i=\lambda_i{v_i}$ for eigenvectors.
In this case matrix $\Lambda$ is scaling column vectors ${v_i}$ grouped in the matrix $V=[v_1 \ \ v_2 \ \dots \ \ v_n]$.
|
[
"math.stackexchange",
"0000358706.txt"
] | Q:
Fourier Series of Multivariable Functions.
If I have some function $V(x,y)$ which is periodic in x with period L. I wish to expand $V(x,y)$ in terms of a fourier sine (for simplicity) series in $x$, is it always the case that I may write the following?
$$V(x,y) = \sum_{n = 1}^{\infty} a_n \sin(\frac{n\pi x}{L})f_n(y)$$
Where the $f_n(y)$ are whatver functions of $y$ needed to satisfy the equality -- do they need to have a particular form or any properties other than continuity?
It seems to me that we might require a certain type of function $V(x,y)$ to do this, does anyone know what the neccessary and sufficient conditions on $V(x,y)$ are to be able to expand in this way?
A:
Not really. Your expansion will, rather, take the form
$$V(x,y) = \sum_{n=1}^{\infty} \: \sum_{m=1}^{\infty} a_{nm} \sin{\left ( \frac{n \pi x}{L}\right)} f_m(y)$$
$f_m$ will be defined by boundary conditions on the $y$ boundaries.
|
[
"stackoverflow",
"0030684675.txt"
] | Q:
Add font to website
I am trying to add Ubuntu font to my website, I'd like to have it universal on the site so everything is in that font but I started by just adding it to my h1 tag to see if it works, but it doesn't work. I have uploaded the font to my server.
This is my css so far:
@font-face {
font-family: 'Ubuntu';
src: url('ubuntu/Ubuntu-R.ttf') format('truetype');
font-weight: normal;
font-style: normal;
}
h1, h2, h3, h4, h5, h6 {
font-family: 'Ubuntu', sans-serif;
font-weight : normal;
margin-top: 10px;
letter-spacing: -1px;
}
h1 {
font-family: 'Ubuntu', sans-serif;
color: #000;
margin-bottom : 0.2em;
font-size : 3em; /* 96 / 16 */
line-height : 1.4;
}
I'm probably being really stupid, as usual. I've looked at other examples but can't really see what I am doing wrong.
A:
The problem can be solved in 2 ways.
You can either add the google font cdn to the head tag of your website if already hosted or if you have active internet access, so you don't need to worry adding it locally in your folder.
<link href='http://fonts.googleapis.com/css?family=Ubuntu' rel='stylesheet' type='text/css'>
Like so
Or probably your file path location is wrong
if your fonts are packed the root directory, you can easily call it like
src: url('Ubuntu-R.ttf') format('truetype');
But if it is in a folder (e.g ubuntu)
it can be src: url('ubuntu/Ubuntu-R.ttf') format('truetype');
But if in a very long path.. try doing something like this
src: url(../ubuntu/Ubuntu-R.ttf);
Then calling the font-family attribute, call just "one" name, if it is locally in a folder. like so
@font-face {
font-family: Ubuntu;
src: url(ubuntu/Ubuntu-R.ttf);
}
.myclass{
font-family:Ubuntu; // The exact name for the @font-face
}
Hope it helps
|
[
"stackoverflow",
"0045979428.txt"
] | Q:
How to set static variable and instantiate only one prefab for the AR Core demo
I'm trying to modify the demo scene in the Unity AR Core SDK and
I've created a static bool variable isCreated to check if the Andy prefab is created.
In the following check
if (Session.Raycast(m_firstPersonCamera.ScreenPointToRay(touch.position), raycastFilter, outHit))
I've set the variable to be true and then put another check here
if (Input.touchCount < 1 || (touch = Input.GetTouch (0)).phase != TouchPhase.Began || isCreated) {
return;
}
But for some reason, the variable is never gets set to true.
I've also noticed this error in the logs, and can't help but wonder if it somehow prevents it from getting set.
08-29 14:11:40.564 13392-13407/? E/Unity: OPENGL NATIVE PLUG-IN ERROR: GL_INVALID_ENUM: enum argument out of range
(Filename: ./Runtime/GfxDevice/opengles/GfxDeviceGLES.cpp Line: 368)
Please help.
A:
I don't know how badly you want your boolean to static, but I achieved the same result by doing something similar:
bool m_placed = false; // under the color array
Then just like you, I check for it here:
if (Session.Raycast(m_firstPersonCamera.ScreenPointToRay(touch.position), raycastFilter, out hit) && !m_placed) {
...
m_placed = true; // At the very end of this block
}
This works perfectly for me. I did not have add anything to
if (Session.Raycast(m_firstPersonCamera.ScreenPointToRay(touch.position), raycastFilter, outHit))
The OpenGL error is a common issue that has already been raised. It should not affect this.
https://github.com/google-ar/arcore-unity-sdk/issues/3
|
[
"stackoverflow",
"0018185051.txt"
] | Q:
EHCache + hibernate: multiple queries to database
I configured my app to use query caching.
Hibernate config:
hibernate.cache.region.factory_class=net.sf.ehcache.hibernate.SingletonEhCacheRegionFactory
hibernate.cache.use_query_cache=true
EHCache config:
<ehcache xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="ehcache.xsd" updateCheck="false" monitoring="autodetect" dynamicConfig="false">
<defaultCache
maxEntriesLocalHeap="10000"
eternal="false"
timeToIdleSeconds="120"
timeToLiveSeconds="120"
maxEntriesLocalDisk="10000000"
diskExpiryThreadIntervalSeconds="120"
memoryStoreEvictionPolicy="LRU">
<persistence strategy="localTempSwap"/>
</defaultCache>
<cache name="query.Dictionary.CountriesList"
maxEntriesLocalHeap="10000"
maxEntriesLocalDisk="1000"
eternal="false"
timeToLiveSeconds="86400">
<persistence strategy="localTempSwap" />
</cache>
</ehcache>
DAO:
Criteria query = session.createCriteria(DictionaryCountry.class)
.setCacheable(true)
.setCacheRegion("query.Dictionary.CountriesList")
.addOrder(Order.asc("name"));
Now when I try to populate a list of countries for the first time - the standard query is made (select * from ... where ... ). But when I do it for the second time - instead of getting from the cache the app executes a lot of get by id sql queries (select * from ... where id = ? ) ...
Is it normal behaviour?
Thank you
A:
After a long time but today i had your same problem, here is an answer if someone needs help with ehcache+spring+hibernate
the previous configuration is correct, i only would add the next:
1.- Add: <property name="hibernate.cache.use_second_level_cache">true</property>
2.- Set in your models
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE )
3.- Add something like this to make your model cacheable
<cache name="com.yourcompany.yourproyect.model.DictionaryCountry
maxEntriesLocalHeap="10000"
maxEntriesLocalDisk="1000"
eternal="false"
timeToLiveSeconds="86400">
<persistence strategy="localTempSwap" />
</cache>
query.setCacheable(true); Required makes your models cacheable
query.setCacheRegion("query.Dictionary.CountriesList"); optional as makes cacheable at query level
ehcache version:
<dependency>
<groupId>net.sf.ehcache</groupId>
<artifactId>ehcache-core</artifactId>
<version>2.4.5</version>
</dependency>
I think that's it as this is working for me.
|
[
"electronics.stackexchange",
"0000323456.txt"
] | Q:
Would a triangle wave have finite or infinite sinusoidal components?
A discontinuity causes a signal to have infinite sinusoidal components, but a triangle wave is continuous, I was taking a class in which an instructor said that since the triangle wave is continuous it can be represented by a finite number of sine components and also showed a finite addition of multiple frequencies of sinusoids which did give the shape of a pure triangle wave.
The only problem I have in mind is that the derivative of a triangle wave is not continuous as it is a square wave and hence would need infinite sum of sinusoids so if one derivates the both sides of the formula of the Fourier series of a triangle wave, we would get a square wave being shown as a sum of finite number of sinusoids. Would that not be incorrect?
A:
a triangle wave is continuous
Quote from here: -
The triangle wave has no discontinuous jumps, but the slope changes
discontinuously twice per cycle
Having the slope change discontinuously also means an infinite range of sinusoidal components.
For instance, if you time integrated a square wave you produce a triangle wave but, all the hamonics of the original square wave are still present after the time integration: -
A:
instructor said that since the triangle wave is continuous it can be represented by a finite number of sine
You either didn't get this right or the instructor misspoke. It's not sufficient for the signal itself to be continuous, but all derivatives must be continuous too. If there is any discontinuity in any derivative, then the repeating signal will have a infinite series of harmonics.
A triangle is continuous, but its first derivative is a square wave, which is not continuous. A triangle wave therefore has a infinite series of harmonics.
A:
Math proof:
Take a function made up of the weighted sum of a finite series of sine/cosine components.
Its derivative is also a weighted sum of a finite series of sine/cosine components. Same if you derivate any number of times.
Since sine and cosine are continuous, the function and all its derivatives are continuous.
Thus, a function having a discontinuity in any of its derivatives can't be built with a finite series of sine/cosine components.
|
[
"math.stackexchange",
"0000427868.txt"
] | Q:
Book recommendations for relearning high school math to study Calculus and beyond?
Assume someone has very limited knowledge of math: low level high school, 5-6 years ago. How would they learn from the basics of algebra, geometry and trigonometry to a solid foundation for calculus and beyond? I would like to relearn math and go to university for computer science.
I am looking for book recommendations, math learning strategies (how to comprehend math texts), and a brief explanation as to how one knows when they are ready to learn calculus.
Some books' prose is quite overwhelming. How can I better understand?
How essential is geometry for calculus? What about for further math such as linear algebra, discrete math or differential equations?
How can I make sure that what I am reading/learning will stick? How can I maximize comprehension of a textbook and rule /definitions? Is notetaking out of a textbook effective?
Is it essential to master high-school math before attempting calculus or can holes be patched in the process of learning calculus?
I am not simply looking for youtube videos, though they are useful I want more substance than simply being spoon-fed.
A:
I would say that you want to focus on what brings you success in your goals for getting a computer science degree.
If you look at a CS program, there are some commonalities, but also differences in how much math they want you to learn. Do you have a specific program in mind? Did you review their prerequisites and the courses you will be required to take? Are you thinking of going to graduate school too as there may be different considerations?
The typical math classes required for a CS major are (check the university/college you are thinking about and make sure you understand their requirements) as follows.
Discrete Math: Topics include combinatorics, number theory, and graph theory with an emphasis on creative problem solving and learning to read and write rigorous proofs.
Computability and Logic: An introduction to some of the mathematical foundations of computer science, particularly logic, automata, and computability theory.
Algorithms: Algorithm design, computer implementation, and analysis of efficiency. Discrete structures, sorting and searching, time and space complexity, and topics selected from algorithms for arithmetic circuits, sorting networks, parallel algorithms, computational geometry, parsing, and pattern-matching.
Mathematical Analysis I: Analysis of the real numbers, and an introduction to writing and communicating mathematics well. Topics include properties of the rational and the real number fields, the least upper bound property, induction, countable sets, metric spaces, limit points, compactness, connectedness, careful treatment of sequences and series, functions, differentiation and the mean value theorem, and an introduction to sequences of functions.
Numerical Analysis: An introduction to the analysis and computer implementation of basic numerical techniques. Solution of linear equations, eigenvalue problems, local and global methods for non-linear equations, interpolation, approximate integration (quadrature), and numerical solutions to ordinary differential equations.
Scientific Computing: Computational techniques applied to problems in the sciences and engineering. Modeling of physical problems, computer implementation, analysis of results; use of mathematical software; numerical methods chosen from: solutions of linear and nonlinear algebraic equations, solutions of ordinary and partial differential equations, finite elements, linear programming, optimization algorithms and fast-Fourier transforms.
Abstract Algebra I: Groups, rings, fields and additional topics. Topics in group theory include groups, subgroups, quotient groups, Lagrange's theorem, symmetry groups, and the isomorphism theorems. Topics in Ring theory include Euclidean domains, PIDs, UFDs, fields, polynomial rings, ideal theory, and the isomorphism theorems. In recent years, additional topics have included the Sylow theorems, group actions, modules, representations, and introductory category theory.
Some CS majors are doing dual CS/Math and additional math courses like Probability, Analysis, Complex Variables, Differential Equations, Partial Differential Equations may also be required.
As for books, I would recommend perusing some of the wonderful MSE responses, for example:
Math from Ground Up
Discrete Math and A good introductory discrete mathematics book.
Abstract Algebra
Algebra
Combinatorics
Logic
Complex Analysis
Calculus and What are the recommended textbooks for introductory calculus?
Practice-Practice-Practice and then practice some more the skill of problem solving, see my response at Expanding problem solving skill
You might also want to get some Schaum's Outline series for math to practice problems. They also have some CS books
You might also consider some of the Dover math book series since the cost is great for review
You mentioned this, but I think it is important. You may want to go through entire courses using opencourseware. For example, MIT. See the OCW Consortium for many more institutions. The goal of this is to gauge where you are with following lectures and testing your understanding.
So, if you look at the specific program you are interested in, I would recommend looking at the math courses in totality (including if you want graduate), checking their required books and looking to see where you stand with all of it. Recall, these programs are also heavy into programming and that is a lot of work, so make sure you are ready for both!
A:
Currently relearning HS Math myself. My current reading list is:
Serge Lang - Basic Mathematics
I.M. Gelfand - Algebra
I.M. Gelfand - Trigonometry
George F. Simmons - Precalculus in a Nutshell
|
[
"stackoverflow",
"0008522424.txt"
] | Q:
How should one avoid input clashes with several USB barcode scanners?
I have two Opticon USB barcode scanners, which are attached to a single PC. If two barcodes are scanned simultaneously, input may be mingled.
For example:
Barcode A: 0123456
Barcode B: 0000000
Scanned simultaneously the output may be 01234000000056
How do I go about preventing this? I would like to avoid having to resort to using two separate machines if at all possible.
In the final system, the barcodes will be inserted into a database through web forms, if this helps at all. I imagine some sort of 'buffer' for every input would be ideal, but I have very little experience of barcode scanners.
Thank you in advance - apologies if my question is not clear enough.
A:
Well, your environment (Windows/Xorg/whichever) usually either (a) reads from a multiplex device (/dev/input/mice is such a multiplexing device, for example), or even (b) reads single devices but throws them into a single pot anyway, such that applications essentially read from a multiplex source again.
In other words, avoid multiplex sources, at least those that do not retain some unique ID to describe where the event actually originated from. How that is done depends on your environment.
|
[
"stackoverflow",
"0010573211.txt"
] | Q:
Java Sonatype Async HTTP Client Upload Progress
I am trying to implement async file upload with progress with sonatype async http client - https://github.com/sonatype/async-http-client.
I tried the method suggested in the docs. Using transfer listener.
http://sonatype.github.com/async-http-client/transfer-listener.html
I implemented onBytesSent of TransferListener interface (just as test):
public void onBytesSent(ByteBuffer byteBuffer) {
System.out.println("Total bytes sent - ");
System.out.println(byteBuffer.capacity());
}
Then in another thread(because I don't want to block the app) I tried to do the following:
TransferCompletionHandler tl = new TransferCompletionHandler();
tl.addTransferListener(listener);
asyncHttpClient.preparePut(getFullUrl(fileWithPath))
.setBody(new BodyGenerator() {
public Body createBody() throws IOException {
return new FileBodyWithOffset(file, offset);
}
})
.addHeader(CONTENT_RANGE, new ContentRange(offset, localSize).toString())
.execute(handler).get();
Everything is fine. File is uploaded correctly and very fast. But the issue is - I am getting messages from onBytesSent in TransferListener only AFTER the upload is finished. For exmaple the upload is completed in 10 minutes. And during that 10 minutes I get nothing. And only after that everything is printed on the console.
I can't figure out what is wrong with this code. I just tried to follow the docs.
I tried to execute the above code in the main thread and it didn't work either.
Maybe it is a wrong way to implement upload progress listener using this client?
A:
I will answer it myself. I did not manage to resolve the issue with TransferListener. So I tried the other way.
I had put the progress logick inside Body interface implementation (inside read method):
public class FileBodyWithOffset implements Body {
private final ReadableByteChannel channel;
private long actualOffset;
private final long contentLength;
public FileBodyWithOffset(final File file, final long offset) throws IOException {
final InputStream stream = new FileInputStream(file);
this.actualOffset = stream.skip(offset);
this.contentLength = file.length() - offset;
this.channel = Channels.newChannel(stream);
}
public long getContentLength() {
return this.contentLength;
}
public long read(ByteBuffer byteBuffer) throws IOException {
System.out.println(new Date());
actualOffset += byteBuffer.capacity();
return channel.read(byteBuffer);
}
public void close() throws IOException {
channel.close();
}
public long getActualOffset() {
return actualOffset;
}
}
Maybe it is a dirty trick, but at least it works.
|
[
"stackoverflow",
"0027034398.txt"
] | Q:
jQueryUI.Sortable update without using Ajax as normal form submit to PHP
I would like to sort a table with data row from MySQL then change the order and submit it.
I used jQueryUI.sortable to make those tr tag (row) draggable.
But when I submitting the form, some of them didn't changed order.
Why? I tried to figure it out, I var_dump the data I submitted and I found a problem:
The tr tag (row) I moved from the original order, won't pass to PHP so var_dump will not show the row ID.
To make it easier to understand, I post my code here:
HTML Code
<table>
<thead>
<tr>
<th>Subject</th>
<th>Content</th>
</tr>
</thead>
<tbody id="sortable">
<tr>
<td>
Hello World
<input name="displayorder[]" type="hidden" value="1" />
</td>
<td>I come from Mars!</td>
</tr>
<tr>
<td>
Hello Mars
<input name="displayorder[]" type="hidden" value="2" />
</td>
<td>I come from Jupiter!</td>
</tr>
<tr>
<td>
Hello StackOverflow
<input name="displayorder[]" type="hidden" value="3" />
</td>
<td>I come from North Korea ...</td>
</tr>
</tbody>
<tbody>
<tr>
<td colspan="2"><input type="submit" value="Submit!" />
</tr>
</tbody>
</table>
I omitted the form content cause it is not important
JavaScript (Sortable Library loaded)
$(document).ready(function() {
$('#sortable').sortable({
helper: fixHelper,
axis: 'y',
opacity: 0.6,
}).disableSelection();
});
var fixHelper = function(e, ui) {
ui.children().each(function() {
$(this).width($(this).width());
});
return ui;
};
PHP
$displayorder = $_POST["displayorder"];
if($displayorder != "") {
$order = 1;
foreach($displayorder as $value) {
mysql_query("UPDATE message SET displayorder=$order WHERE announcementid=$value");
$order++;
}
}
I will prefer not using Ajax to do this because I have dozens of similar page to do the same task.
Thanks in advance.
A:
Well I decided to code it every page.
The code now:
JavaScript
$(document).ready(function() {
$('#sortable').sortable({
helper: fixHelper,
axis: 'y',
opacity: 0.4,
update: function(event, ui){
var data = $(this).sortable('serialize');
$.ajax({
data: data,
type: 'POST',
url: '/update.php?action=displayorder'
});
},
}).disableSelection();
});
var fixHelper = function(e, ui) {
ui.children().each(function() {
$(this).width($(this).width());
});
return ui;
};
PHP
foreach($_POST["displayorder"] as $i => $value) {
mysql_query("UPDATE message SET displayorder=$i WHERE announcementid=$value");
$i++;
}
|
[
"stackoverflow",
"0042873691.txt"
] | Q:
git rebase --onto fails
I created a local repository for learning git so losing data is not a problem.
Current tree:
* 006f7ab - (2 days ago) Merge branch 'hotfix' idc what will heppen :( - stav alfi (master
|\
| * 0f028e8 - (2 days ago) good - stav alfi
* | fc040d3 - (2 days ago) good - stav alfi
* | ed29b30 - (2 days ago) good - stav alfi
|/
* a7c5bb3 - (2 days ago) good branch - stav alfi
* 9d804c2 - (2 days ago) new.txt changed in 16:35 - stav alfi
* 6ada3b7 - (2 days ago) new.txt changed in 16:32 - stav alfi (oldDad)
* f6497fc - (2 days ago) this is the nest commit! - stav alfi (oldDad1)
* b1b3e25 - (2 days ago) omg - stav alfi
* 74656b3 - (2 days ago) new1234 - stav alfi
* e8977d3 - (2 days ago) fast commit - stav alfi
* 114b46c - (3 days ago) good - Stav Alfi
* 8212c78 - (3 days ago) good - Stav Alfi
* 23dfc61 - (3 days ago) removed-something - Stav Alfi
* 184178d - (3 days ago) shortcut - Stav Alfi
* f1e606f - (3 days ago) good-commit - Stav Alfi
* 5ae787b - (3 days ago) initial-project-version1 - stav alfi
* 1321cba - (3 days ago) initial-project-version1 - stav alfi
* eae3e1c - (3 days ago) initial-project-version - stav alfi
* d3c3e93 - (3 days ago) initial-project-version - Stav Alfi
* db309e9 - (3 days ago) initial-project-version - Stav Alfi (HEAD -> newDad)
Desired tree: (What I'm tring to do)
* 006f7ab - (2 days ago) Merge branch 'hotfix' idc what will heppen :( - stav alfi (HEAD -> master)
|\
| * 0f028e8 - (2 days ago) good - stav alfi
* | fc040d3 - (2 days ago) good - stav alfi
* | ed29b30 - (2 days ago) good - stav alfi
|/
* a7c5bb3 - (2 days ago) good branch - stav alfi
* 9d804c2 - (2 days ago) new.txt changed in 16:35 - stav alfi
* 6ada3b7 - (2 days ago) new.txt changed in 16:32 - stav alfi (oldDad)
(....I want to remove all of this....)
* db309e9 - (3 days ago) initial-project-version - Stav Alfi (newDad)
The command I used + error:
$ git rebase --onto newDad oldDad1
First, rewinding head to replay your work on top of it...
Applying: new.txt changed in 16:32
error: Failed to merge in the changes.
Using index info to reconstruct a base tree...
A new.txt
Falling back to patching base and 3-way merge...
CONFLICT (modify/delete): new.txt deleted in HEAD and modified in new.txt changed in 16:32. Version new.txt changed in 16:32 of new.txt left in tree.
Patch failed at 0001 new.txt changed in 16:32
The copy of the patch that failed is found in: .git/rebase-apply/patch
When you have resolved this problem, run "git rebase --continue".
If you prefer to skip this patch, run "git rebase --skip" instead.
To check out the original branch and stop rebasing, run "git rebase --abort".
git status (after running git rebase --onto newDad oldDad1)
$ git status
rebase in progress; onto db309e9
You are currently rebasing branch 'master' on 'db309e9'.
(fix conflicts and then run "git rebase --continue")
(use "git rebase --skip" to skip this patch)
(use "git rebase --abort" to check out the original branch)
Unmerged paths:
(use "git reset HEAD <file>..." to unstage)
(use "git add/rm <file>..." as appropriate to mark resolution)
deleted by us: new.txt
no changes added to commit (use "git add" and/or "git commit -a")
My questions:
I'm failing to understand the error I got and what are to following words:
rewind,replay,applying.
I'm also don't know how to fix a conflict.
I'm interested to know what I did wrong and how to fix it.
Thanks a lot!
A:
OK, deep breath :-)
Git's rebase copies commits
The fundamental trick that git rebase uses is the cherry-pick operation, which copies a commit. We'll get to the mechanics of copying a commit at the moment, but consider a simple, ordinary git cherry-pick, where we git checkout some branch name—I will create a new one pointing to one particular commit here—and then tell Git to copy some other commit, typically one that is not (yet) on our new branch.
git checkout -b newbranch 23dfc61
This makes 23dfc61 the current commit but gives it a new branch name, newbranch. Now we can make new commits, which add to the new branch, so now we run, e.g.:
git cherry-pick 9d804c2
to copy commit 9d8042c.
The result, if this works—if there is no merge conflict, or after you clean up any merge conflict if there is one—is a new commit whose parent is 23dfc61, and whose source tree is like 23dfc61 but with whatever you changed in 9d804c2 as compared to 6ada3b7, added to it:
...
* 9d804c2 - (2 days ago) new.txt changed in 16:35 - stav alfi
* 6ada3b7 - (2 days ago) new.txt changed in 16:32 - stav alfi (oldDad)
* f6497fc - (2 days ago) this is the nest commit! - stav alfi (oldDad1)
* b1b3e25 - (2 days ago) omg - stav alfi
* 74656b3 - (2 days ago) new1234 - stav alfi
* e8977d3 - (2 days ago) fast commit - stav alfi
* 114b46c - (3 days ago) good - Stav Alfi
* 8212c78 - (3 days ago) good - Stav Alfi
| * NNNNNNN - (now) new.txt changed in 16:35 - stav alfi (HEAD -> newbranch)
|/
* 23dfc61 - (3 days ago) removed-something - Stav Alfi
* 184178d - (3 days ago) shortcut - Stav Alfi
...
We don't know what the new hash number will be, so I put in NNNNNNN. But the new commit has the same log message as the old one, and makes the same change as the old one.
Commits contain snapshots, not changes
Each commit has, attached to it, the complete source as of the time of that commit. This is different from many other version control systems: most tend to store each commit as a change from the commit before them, or the commit after them. What this means here is that in order to copy a commit, Git first has to find out what changed.
The way to find out is to compare the commit to its parent commit. The parent commit of 9d804c2 is 6ada3b7, so Git does:
git diff 6ada3b7 9d804c2
to see what changed. Assuming the log message is accurate, you changed something in new.txt, so that's what Git will find. That, then, is also what Git will try to do when it tries to modify the snapshot saved for 23dfc61 to come up with a new snapshot for NNNNNNN.
If that succeeds, Git will commit the result, and will have made a successful cherry-pick.
No commit can ever be changed
The unpronounceable hash IDs 23dfc61 and 6ada3b7 and badf00d and bedface and so on are constructed by taking the exact contents of each commit. If you try to change anything about any commit, Git builds a new commit; if there's even a single bit different anywhere, you get a new, different hash, so you get a new, different commit.
The parts that go into this include all the source, plus the parent ID, as each commit "points to" (contains the ID of) its parent. (There are also some time stamps, so unless you make the same commit twice in the same second, you still get two different IDs, even if they have the rest of their bits identical.) Hence, to change anything—whether it's the source, or just a parent ID—Git must copy commits.
This is why rebase copies commits: it must. You are taking some set of commits, turning each one into a change, and then applying those changes starting at some different commit, which has a different parent ID, even if it has the same source tree. So what you give to git rebase is, essentially, two chunks of information:
Which commits should it copy?
Where should it place those copies?
The place to copy is easy if you use --onto, as that's the place! The set of commits to copy, however, is trickier.
Selecting commits
Git provide a range notation, X..Y, that looks like it means "commits between X and Y"—and it does, sort of. But not quite! In fact, Git uses something we call reachability, following parent links in commits. We already noted that each commit has a parent ID stored in it. That's how Git can find your commits: you tell it to start at a branch tip, using a branch name like master, and it finds that particular commit by its hash ID, which Git remembers for you inside the name master.
That commit has another hash ID in it: this is the commit's parent. Git uses that hash ID to find that commit. The parent has yet another hash ID, and Git keeps finding more and more parents. This goes on as long as it possibly can, all the way back to the very first commit you ever made.
That's too many commits, so we tell Git to stop going back at some point. That's the "Y" part of X..Y: this tells Git start at Y and work backwards, marking commits "green" temporarily to take them. But, at the same time, start at X and work backwards, marking commits "red" temporary to avoid taking them.
I like to draw all of this with one-letter names for commits, instead of the big ugly hash IDs, and connecting lines that have older commits at the left and newer commits at the right:
...--D--E--F--G--H <-- branch
Here commit H is the tip of the branch, G is H's parent, F is G's parent, and so on. If we write E..H, that paints E (and D and on back) "red": stop, don't take these! Then it paints H green, and then G and F, and then we hit the red-painted E and stop. So that selects commits F-G-H. E is naturally excluded here.
But when we have branches and merges, things get trickier:
F--G--H
/ \
...--D--E K--L
\ /
I-----J
Commit K is a merge commit. A merge commit is one that has two (or more, but let's not go there) parents. If we stick with the red and green paint analogy, E..L means "paint E and on back red and paint L on back green": when we hit K, we paint both H and J green, and work back on both sides of this branch/merge.
If we say G..L, look how that works: we paint G red, then F, then E and D and so on. We never paint I at all, because that's not backwards from F: we can only move back, not forward, during this process. So then we paint L green, and K, and then both H and J. G is already red, so we stop that side, but keep going on the other, painting I green. Then we move back to E, but it's red so we stop. So this selects I and J, and also H, and K and L (in some order).
What git rebase copies: merges are a problem
When Git goes to select commits to copy, it uses your other (not---onto) argument as the "red paint" part of the stop item, and your current commit as the "green paint" part. If you don't use --onto, the onto target is the same as the red-paint selector. That's all --onto does: it lets you choose a different "stop" red-paint selector than the target.
But if there is a merge in here—and in your case, there is—we have a problem, or really, two problems. One is that rebase cannot copy a merge, so it just does not even try. It just removes merges entirely, from the set of commits to copy. The other is that we follow both legs of a branch-and-merge, but we do not get to control the order unless we use an interactive (-i) rebase.
You were on master and ran:
git rebase --onto newDad oldDad1
so this selects:
oldDad1..master
as the commits to copy, but throws out all the merges, and linearizes the remainder of the commits. That means you start with:
* 006f7ab - (2 days ago) Merge branch 'hotfix' idc what will heppen :( - stav alfi (master
|\
| * 0f028e8 - (2 days ago) good - stav alfi
* | fc040d3 - (2 days ago) good - stav alfi
* | ed29b30 - (2 days ago) good - stav alfi
|/
* a7c5bb3 - (2 days ago) good branch - stav alfi
* 9d804c2 - (2 days ago) new.txt changed in 16:35 - stav alfi
* 6ada3b7 - (2 days ago) new.txt changed in 16:32 - stav alfi (oldDad)
but end up with:
* 0f028e8 - (2 days ago) good - stav alfi
* fc040d3 - (2 days ago) good - stav alfi
* ed29b30 - (2 days ago) good - stav alfi
* a7c5bb3 - (2 days ago) good branch - stav alfi
* 9d804c2 - (2 days ago) new.txt changed in 16:35 - stav alfi
* 6ada3b7 - (2 days ago) new.txt changed in 16:32 - stav alfi (oldDad)
or—since we don't control the order:
* fc040d3 - (2 days ago) good - stav alfi
* ed29b30 - (2 days ago) good - stav alfi
* 0f028e8 - (2 days ago) good - stav alfi
* a7c5bb3 - (2 days ago) good branch - stav alfi
* 9d804c2 - (2 days ago) new.txt changed in 16:35 - stav alfi
* 6ada3b7 - (2 days ago) new.txt changed in 16:32 - stav alfi (oldDad)
(all I did here was swap the two legs around). Git will check out commit db309e9 (newDad, your --onto) as a temporary branch, and then start cherry-picking each commit, turning 6ada3b7 into a change by comparing it against f6497fc. But this immediately fails:
error: Failed to merge in the changes.
Using index info to reconstruct a base tree...
A new.txt
Falling back to patching base and 3-way merge...
CONFLICT (modify/delete): new.txt deleted in HEAD and modified
in new.txt changed in 16:32. Version new.txt changed in 16:32
of new.txt left in tree.
The problem here is that new.txt does not exist in commit db309e9. Git does not know how to combine "make a slight change to new.txt" with "don't have a new.txt at all".
It's now your job to fix this conflict, by deciding how to have new.txt appear in the final snapshot. Edit or remove the file in the work-tree and when you are done, git add the result and run git rebase --continue and Git will go on to attempt to cherry-pick the next commit.
This repeats until git rebase has copied all the to-be-copied commits. Once that finishes, git rebase tells Git to "peel off" the original branch label (master) and paste it onto the last commit it just made. So now the master branch will name the newest commit, which will point back to its parent, and so on. The original commits—the ones you copied—are still in the repository, for a while, but they are now "abandoned" from this branch: they do not have the name master available to find them.
But existing branch names can still find the existing commits
The names oldDad and oldDad1 still point to some of the original (not-copied) commits here. Those names will still find those original commits. If there were more names that remembered some of the copied commits, those names would still remember the originals too. So the copied commits are not only not gone, sometimes they are still visible, depending on branch names.
Note that your final merge is just gone
Because git rebase does not even try to copy the merge, your merge commit will simply be omitted entirely. However, since both "legs" of the merge get applied (in some order), the final source tree will match, provided you resolve any conflicts appropriately. How hard or easy that will be depends on which leg gets done first and whether the two legs affect each other.
There is a --preserve-merges flag
There is a way to get git rebase to attempt to preserve merges. But it cannot actually preserve them. Instead, what it does is to copy each leg of a fork as before, but this time, by forking the two legs; and then when it reaches the merge commit, it runs a new git merge to make a new merge that—Git hopes—is "just as good" as the original.
In this particular case, --preserve-merges won't help with the immediate problem, because that happens before the branch-and-re-merge sequence. This new.txt file that is modified in the first commit you are cherry-picking, but does not exist in your starting-point, happens well before the branch-and-merge sequence. Whether --preserve-merges is any use to you, I do not know.
|
[
"stackoverflow",
"0041534593.txt"
] | Q:
Why would I ever use tf.concat instead of tf.stack?
Is there a good reason to use tf.concat instead of tf.stack? They seem very similar. Is it just to guarantee that the resulting tensor will have the same number of dimensions as the input list of tensors?
A:
Actually, I've misunderstood how tf.stack works. If the axis parameter is within the range of the existing dimensions, a new axis will be inserted at that index.
Example:
import tensorflow as tf
t1 = tf.random_normal([1, 3])
t2 = tf.random_normal([1, 3])
tf.stack([t1, t2], axis=1).shape.as_list() == [1, 2, 3]
tf.concat([t1, t2], axis=1).shape.as_list() == [1, 6]
|
[
"rpg.stackexchange",
"0000034309.txt"
] | Q:
Are class and feat based proficiencies cumulative?
A dnd 4e dwarf fighter has the feat Dwarven Weapon Training, which reads:
'+ damage and proficiency with axes and hammers.'
Our dwarf is already proficient with both weapons, being a fighter. Does that mean his attack roll with these weapons remain unchanged, or does increase by 2 with the feat?
A:
Proficiency is a boolean state - either you have it or you don't (for any given weapon).
So Dwarven Weapon Training will give a fighter the damage bonus, and proficiency with any axes and hammers they don't already have (exotic weapons!), but there's no additional bonus for already having proficiency.
A:
Attack remains unchanged, damage increases by 2...
...probably.
The dwarven weapon training feat specifies that it grants you proficiency in hammers and axes, including superior ones (there are none in the PHB1 or PHB2, but 6 in the Adventurer's Vault and maybe others elsewhere). Since the fighter is already proficient with military weapons, you do not gain the proficiency bonus again when wielding them. If you find a superior axe or hammer and begin wielding that, you would gain the proficiency bonus on attacks with it which you did not have before. All of the superior axes and hammers in the Adventurer's Vault have a +2 proficiency bonus, so their attack would remain the same as if you were wielding a military axe or hammer out of the PHB. There may be an axe or hammer with a different proficiency bonus, which would give you a different attack, but I'm not aware of them.
The feat also states that you gain a +2 feat bonus to damage. If you have no other feat bonuses to damage (and I can't tell by the question), this will increase your damage by +2. If you have other feat bonuses to damage, this will replace them if it is larger than they are, and do nothing if the other feat bonuses are already +2 or larger, per the bonus stacking rules (PHB 1, pg 192).
|
[
"space.stackexchange",
"0000013269.txt"
] | Q:
Ways to mitigate the forever violent re-entry? (Moon landings vs Earth landings)
The frequently used reasoning for why the re-entry should always be performed at neck-breaking speeds is that it would take almost as much fuel to slow the craft down as it takes to launch it into orbit, and that's a whole lot of extra weight to carry.
Now correct me if I'm wrong, but the Apollo crafts were, in fact, extremely fast; they reached the Moon and inserted themselves into orbit in mere days; after which the lunar module would detach itself from the mothership and begin a hovering descent to the surface (in the final stage, the descent was vertical). They slowed down sufficiently to make their landings smooth (i.e. from orbital speed to near-zero) and had enough fuel left to get back into orbit afterwards. And this was nearly half-a-century ago.
And yet each time someone returns from orbit these days, they run the risk of getting bounced back into space and getting lost there forever, or burning up before they reach the surface, due to their great speed. And then they use parachutes to perform an uncontrolled splashdown or touchdown wherever, and have to be "rescued" every time. Why?
A:
In the lunar landing case, there's simply no choice; there's no atmosphere available to decelerate the landing craft, so powered landing is the only option. Fortunately, with 1/6 of Earth's gravity, and starting from about 1/5 the speed of Earth orbit, decelerating, hovering, and landing on the moon is much cheaper in fuel -- but we still had to use the largest successful rocket launcher ever built, the Saturn V, to send that fuel mass to the moon.
In the Earth return case, the deceleration is achieved almost for free using atmospheric drag; the mass of parachutes is tiny compared to the mass of fuel that would be required, and even tinier compared with the launch fuel that would be needed to boost the landing fuel into orbit.
In 54 years of manned space flight, no ship has ever gotten "bounced back into space and lost there forever"; only one has "burned up" (Columbia), though other failures during reentry killed Soyuz crews in the early days (1967 and 1971).
For the last 44 years, blunt-capsule reentry and parachute landing has been 100% safe.
Between 1981 and 2011, 99.25% of manned spaceplane reentries from orbit were successful.
It's unlikely that powered rocket landings would prove to be any safer.
A:
The Apollo craft started its trip to the Moon at a high speed. But as it got further away from Earth (climbing out of Earth's gravity well), its speed dropped steadily, and it orbited the Moon at a speed of about 1.5 km/s. That's much lower than Earth orbit (around 8 km/s), which makes a propulsive landing much easier.
A:
Now correct me if I'm wrong, but the Apollo crafts were, in fact, extremely fast; they reached the Moon and inserted themselves into orbit in mere days; after which the lunar module would detach itself from the mothership and begin a hovering descent to the surface (in the final stage, the descent was vertical).
The flight from lunar orbit to the surface was mostly ballistic. The Apollo lander made a small burn (33 meters/second) to depart from that lunar orbit, placing the vehicle on an elliptical orbit whose perilune was slightly inside the Moon. Then it simply fell for about an hour. It was only in the final twelve minutes of descent where the lander expelled a sizable amount of fuel to cancel that orbital velocity.
Even then, the total delta V from lunar orbit to landing was only 1750 meters/second. That's a fraction of the delta V that would be needed to have a vehicle enter the Earth's atmosphere at sub-orbital speeds -- and then the vehicle would still have about 100 km to fall before landing on the Earth.
The only technology currently available to dump the 6750 meters/second (or more) of orbital velocity is to use the Earth's atmosphere to perform most of the braking.
|
[
"stackoverflow",
"0026565237.txt"
] | Q:
Using multiple UIViews instead of multiple UIViewControllers
I've been developing an iOS application for 3 months. I know all MVC stuff, design issues and UIViewController capabilities comparing to those of UIView. However, I cannot find an answer to why using multiple UIViewControllers is a "should-to-do" for multiple screens. For example, instead of using one UITabBarController and one UIViewController for each tab, we can have single UIViewController, which is root view controller, and one UIView for each tab. Then, to provide tab functionality, we can use hidden property of UIView. In other words, for instance, to show the first tab, we set hidden = true of all UIViews (corresponding to a tab) other than UIView corresponding to the first tab.
What is the problem aside from design issues in this approach? Time&memory efficiency, segue animations etc.?
Thanks in advance...
A:
Yes, you can use single controller and multiple views and swap between them when needed. (for simple use cases)
But if the functionality in each tab is complex and unrelated to each other, then its always better to go for multiple controllers.
Suppose you are building a tabbed twitter application using open source API available,with first tab for timeline, second tab for your interactions,third for profile then for settings so on. In this case each tab will have complex set of functionalities and its better to delegate related tasks to different controllers.
Hope you found a answer!
|
[
"stackoverflow",
"0013590799.txt"
] | Q:
How to import and export multiple data using sas macro?
Hi I am trying to do a macro in sas to the following function
I have multiple sas files(apr12part1, apr12part2,...,aug12part1,aug12part2), for each file(for example:apr12part1) , I will do some process which will give me three output(for example: apr12part1out1,apr12part1out2,pr12part1out3).
I was trying to use macro, but was not very sucssesful. Can someone help me with it? Thank you very much!
The code I was using is something like this
%macro test(month=,part=);
...FROM EC100002.&month_part_&part
...
proc export data=SASUSER.Out1 outfile='G:\Output\Output1_&month_&part.csv' dbms=csv replace;
proc export data=SASUSER.Out2 outfile='G:\Output\Output2_&month_&part.csv' dbms=csv replace;
proc export data=SASUSER.Out3 outfile='G:\Output\Output3_&month_&part.csv' dbms=csv replace;
run;
%mend test
%test(month=apr12,part=1)
A:
Try adding "dots" to concatenate the macro variables into a complete string. Also, you must use double-quotes, not single-quotes. Something like this:
proc export data=SASUSER.Out1
outfile="G:\Output\Output1_&month._&part..csv" dbms=csv replace;
Note there are two "dots" after your &part variable; the first is a concatenation operator and the second is part of the file name.
You probably have similar issues with the code not displayed, so check that as well.
|
[
"serverfault",
"0000189987.txt"
] | Q:
The feasibility of using a VPN to secure only the admin portion of a site
It has been suggested that the admin section of a custom CMS we use require VPN access to reach. How feasible is this? The site has a front end http://thesite.com and a backend http://thesite.com/admin so how feasible is it to set things up so that anyone accessing the admin areas has to log in via VPN?
I have some experience in setting up an openvpn server (under Ubuntu... this one is under CentOS) but that was for use as a secure proxy for access from remote locations (i.e. fire up the laptop, login to the VPN and surf securely). This situation is different as, in an effort to help secure a too frequently hacked site, the owner has decided a VPN is the solution.
So, is this doable? Completely the wrong direction? We are working on securing this mess of code (and have closed some vulnerabilities) but in the meantime... suggestions/tips/links?
A:
"Doable" by limiting admin applications/access to either local net, loop-back or same IP/net (depending on your specific configuration and requirements), so that the "from" IP will only match those conditions when it is through the VPN.
|
[
"stackoverflow",
"0033056788.txt"
] | Q:
F# set to list with set.fold
I'm programming in F# and I'm trying to make a set into a list by using Set.fold.
What I've done so far:
let list sa = Set.fold (fun se sa -> sa) [];;
but it doesn't seem to make the set into a list, rather it makes a
set of lists into a list.
What change can I make to my code so it won't make a set of lists into a list, but a set into a list.
A:
let toList s = Set.fold (fun l se -> se::l) [] s
1st argument of fold is the function taking two parameters: accumulator of type that should be returned from fold (list in your case), and a single element of collection fold applies to (element of Set in your case). This function should return the type same of accumulator passed as the 1st parameter (list in your case). So the function should add current element of the set to list accumulator.
2nd parameter of fold is the initial accumulator state (empty list in your case).
3rd parameter of fold in your case is the Set you want to transform to list
|
[
"dba.stackexchange",
"0000122533.txt"
] | Q:
Using composite indexes with date ranges
I have a table of rules that are activated depending on the date.
id - name - value - start_date - end_date
1 - ABC - 10 - 2015-12-01 - 2015-12-31
2 - DEF - 15 - 2016-02-01 - 2016-02-29
My SQL queries will mostly be
SELECT * FROM rules WHERE start_date <= '2015-12-05' and end_date >= '2015-12-05';
Does having a composite index consisting of start_date and end_date help or will worsen the query?
Edit: I'm using MySQL
A:
It's hard to tell if this will hurt or improve performance but adding a composite index on start_date and end_date shouldn't worsen your query, the index will either be used or not used. You didn't mention your RDBMS but I don't think there's a big chance your engine's optimizer will pick a worse plan (but there is always a possibility I guess).
That being said, there might a slight impact on the plan generation time as your optimizer has an extra index to consider, and unused indexes could hurt overall performance since they need to be maintained when updating/inserting.
Whether or not the index will help your query is hard to tell, it depends on the selectivity of your query and cardinality statistics. The optimizer of your RDBMS should pick the fastest way to get your data, and even if an index exists on the columns your query selects on a full scan might still be the better option, especially since you are selecting all fields.
Why is selectivity important?
When an index is used, the index points to the actual record and the record might need to be fetched using the pointer. Most RDBMS work that way.
See for example the Oracle documentation
In general, index access paths should be used for statements that
retrieve a small subset of table rows, while full scans are more
efficient when accessing a large portion of the table
or the SQL Server documentation
In many cases, the optimizer forces a table scan for queries with
result sets of approximately 5 percent, although the table scan
becomes more efficient than index access at selectivities of 8 to 10
percent.
Why is select * important
If you fetch all fields from a table your database engine will have to fetch the actual record using the pointer stored in the index and you lose the option of using included columns (if your RDBMS supports them) or covering indexes.
See for example this article for SQL server (but the principle goes for a lot of vendors): Using Covering Indexes to Improve Query Performance
However, under some circumstances, the overhead associated with
nonclustered indexes may be deemed too great by the query optimizer
and SQL Server will resort to a table scan to resolve the query.
and
The observed improvement is due to the fact that the nonclustered index
contained all of the required information to resolve the query. No Key
Lookups were required.
|
[
"meta.stackoverflow",
"0000257984.txt"
] | Q:
Need ClearenceAbout Old Answers
My account was almost blocked in the past due to a answers in the past that i do not remember anything about it, i have been asked if it was a network, does that mean that my company network, and if is it so could some one hacked to my account from my company and answered those answers and how i can see those deleted answers?
A:
It is rather unlikely that your account was hacked and the hacker used it to post answers.
But you can view your deleted answers from the past 60 days. Perhaps that will refresh your memory.
Go to your user profile page (click on your user icon at the top of the page).
In the "Answers" section, click on the link that says "view more".
At the bottom of the next page, click on the link that says "deleted recent answers".
You'll end up at this page.
|
[
"stackoverflow",
"0015626837.txt"
] | Q:
Find all iFrames in HTML page and replace them
I'm wanting to use jQuery to find all iFrames in a page, then replace it's source.
I'm doing this from a chrome extension, so any way to replace it, then load the iframe (So that the iframe is active on the page view), is what I'm looking for.
Any help is appreciated.
A:
You can all instances of an element by calling a jquery selector on the element name:
$('iframe')
And you can set the attribute of any element with the jquery attr() method. To replace all the iframes on a page with google.com you would do this:
$('iframe').attr('src','http://google.com');
--Disclaimer: The following code is an example of something referenced in the comments and is not an anwser to the Original question:
This is how you would iterate each iframe and check it's width:
$('iframe').each(function(){
var currentIframeWidth = $(this).width();
//do something with width here
});
|
[
"drupal.stackexchange",
"0000051948.txt"
] | Q:
When should the second drupal_bootstrap() parameter be used?
The parameters accepted from drupal_bootstrap() are two; the second is defined as follows:
$new_phase: A boolean, set to FALSE if calling drupal_bootstrap() from inside a function called from drupal_bootstrap() (recursion).
I am not clear what that means, considering that statistics_exit() calls drupal_bootstrap() using just one argument, thus using the default value (TRUE) for the second one.
hook_exit() is invoked from _drupal_bootstrap_page_cache(), and statistics_exit() should be considered a function called from drupal_bootstrap() that calls drupal_bootstrap().
Yet, statistics_exit() is not using FALSE as second argument of drupal_bootstrap().
The only functions I found that use FALSE as second parameter are module_hook_info(), and _drupal_bootstrap_page_cache(), which contain the following code.
// module_hook_info()
// This function is indirectly invoked from bootstrap_invoke_all(), in which
// case common.inc, subsystems, and modules are not loaded yet, so it does not
// make sense to support hook groups resp. lazy-loaded include files prior to
// full bootstrap.
if (drupal_bootstrap(NULL, FALSE) != DRUPAL_BOOTSTRAP_FULL) {
return array();
}
// _drupal_bootstrap_page_cache()
// Check for a cache mode force from settings.php.
if (variable_get('page_cache_without_database')) {
$cache_enabled = TRUE;
}
else {
drupal_bootstrap(DRUPAL_BOOTSTRAP_VARIABLES, FALSE);
$cache_enabled = variable_get('cache');
}
From the code of the first function I could understand I should use FALSE when I need to get the current bootstrap phase, and in that case I would use NULL as first argument.
From the code of the second function, I could understand that the second parameter is an internal parameter, I should not pass in my functions, as the only case a function of mine is being called from drupal_bootstrap() (also indirectly, through a function that is called from drupal_bootstrap()) is when I implement hook_boot(), or hook_exit().
Did I misunderstand what said in the drupal_bootstrap() documentation? If it so, when exactly should a call to drupal_bootstrap() use a value for the second parameter that is different from its default value?
A:
Without looking more closely, I can't say why statistics_exit() doesn't use the second parameter, but to answer your question...
The drupal_bootstrap() function only ever needs to be called with $new_phase = FALSE to prevent an infinite loop inside the core bootstrap code. Unless you're modifying core, you'll always want to leave the second parameter at its default value (TRUE).
The key is in this part of drupal_bootstrap():
// When not recursing, store the phase name so it's not forgotten while
// recursing.
if ($new_phase) {
$final_phase = $phase;
}
Calls to drupal_bootstrap() outside the bootstrap process always specify the final phase to bootstrap to (or only get the current phase, in which case, the second parameter doesn't matter). When you call drupal_boostrap(), you'll be initiating the bootstrap process and setting the static $final_phase variable, so you'll need $new_phase = TRUE.
|
[
"stackoverflow",
"0006190723.txt"
] | Q:
SQL Server 2005 - how to compare field value, and return a count if different, for every occurance
DECLARE @CURRENTSCHOOL TABLE (STUDENT VARCHAR(8), COURSE VARCHAR(8), SCHOOL VARCHAR(2))
INSERT INTO @CURRENTSCHOOL VALUES ('10000000','MCR1010','11')
INSERT INTO @CURRENTSCHOOL VALUES ('12000000','MCR6080','11')
INSERT INTO @CURRENTSCHOOL VALUES ('13000000','MCR6090','15')
DECLARE @OTHERSCHOOLS TABLE (STUDENT VARCHAR(8), COURSE VARCHAR(8), SCHOOL VARCHAR(2))
INSERT INTO @OTHERSCHOOLS VALUES ('10000000','MCR1010','11')
INSERT INTO @OTHERSCHOOLS VALUES ('10000000','MCR1011','14')
INSERT INTO @OTHERSCHOOLS VALUES ('10000000','MCR1012','15')
INSERT INTO @OTHERSCHOOLS VALUES ('12000000','MCR6080','19')
INSERT INTO @OTHERSCHOOLS VALUES ('13000000','MCR6090','15')
For the above sample data. Two tables. Currentschool and Otherschools.
Currentschool is the current course that a student is on including the schoolcode,
and is the main table.
OtherSchools is potentially other courses that a student can go on, in differing schools.
I need to compare the currentschool table against the otherschools table matched using the student id number, and for every different schoolcode in otherschools, it needs to return a count.
eg:
Student: OtherSchoolCount:
10000000 2 (because of 2 different school codes than than the current school)
12000000 1 (because of 1 different school code than than the current school)
13000000 blank (because not a different school code)
Is this possible?
Many thanks
M.
A:
select o.Student, count(*) as Count
from CURRENTSCHOOL c1
inner join OTHERSCHOOLS o on c1.Student = o.Student --this join is to ensure student exists in both tables
left outer join CURRENTSCHOOL c on o.Student = c.Student
and o.School= c.School
where c.Student is null
group by o.Student
A:
SELECT cs.student,
COUNT(os.course)
FROM @CURRENTSCHOOL cs
LEFT JOIN @OTHERSCHOOLS os
ON cs.student = os.student
AND cs.school <> os.school
GROUP BY cs.student
outputs
STUDENT
-------- -----------
10000000 2
12000000 1
13000000 0
If Null is really preferred over Zero then you can do this (or use the equivalent CTE)
SELECT student,
CASE
WHEN coursecount = 0 THEN NULL
ELSE coursecount
END coursecount
FROM (SELECT cs.student,
COUNT(os.course) coursecount
FROM @CURRENTSCHOOL cs
LEFT JOIN @OTHERSCHOOLS os
ON cs.student = os.student
AND cs.school <> os.school
GROUP BY cs.student) t
Which outputs
student courseCount
-------- -----------
10000000 2
12000000 1
13000000 NULL
Update: NullIF could be put to use as alternative to the Case statement see What applications are there for NULLIF()?
|
[
"stackoverflow",
"0053102577.txt"
] | Q:
Classifying negative and positive words in large files?
I am trying to get the count of positive and negative in a very large file. I only need a primitive approach(that does not take ages). I have tried sentiwordnet but keep getting a IndexError: list index out of range, which I think it's due to the words not being listed in wordnet dictionary. The text contains a lot of typos and 'non-words'.
If someone could give any suggestion, I would be very grateful!
A:
It all depends on what your data is like and what is the final objective of your task. You need to give us a little bit more detailed description of your project but, in general, here are your options:
- Make your own sentiment analysis dictionary: I really doubt this is what you want to do since it takes a lots of time and effort but if your data is simple enough it's doable.
- Clean your data: if your tokens aren't in senti-wordnet because there's too much noise and badly spelled words, then try to correct them before passing them through wordnet, it will at least limit the number of errors you'll get.
- Use a senti-wordnet alternative: accorded, there aren't that many good ones but you can always try sentiment_classifier or nltk's sentiment if you're using python (which by the looks of your error seems like you are).
- Classify only what you can: this is what I would recommend. If the word is not in senti-wordnet, then move on to the next one. Just catch the error (try: ... except IndexError: pass) and try to infer what the general sentiment of the data is by counting the sentiment words you actually catch.
PS: We would need to see your code to be sure but I think there's another reason why you're getting an IndexError. If the word was not in senti-wordnet you would be getting a KeyError, but it also depends on how you coded your function.
Good luck and I hope it was helpful.
|
[
"stackoverflow",
"0008496212.txt"
] | Q:
Node.js fs.unlink function causes EPERM error
I'm using fs.unlink() to delete a file and I receive the following error:
uncaught undefined:
Error: EPERM, Operation not permitted '/Path/To/File'
Anyone know a why this is happening?
A:
You cannot delete a directory that is not empty.
And fs.unlinkSync() is used to delete a file not a folder.
To remove an empty folder, use
fs.rmdir()
to delete a non empty folder, use this snippet:
var deleteFolderRecursive = function(path) {
if( fs.existsSync(path) ) {
fs.readdirSync(path).forEach(function(file) {
var curPath = path + "/" + file;
if(fs.lstatSync(curPath).isDirectory()) { // recurse
deleteFolderRecursive(curPath);
} else { // delete file
fs.unlinkSync(curPath);
}
});
fs.rmdirSync(path);
}
};
Snippet from stackoverflow: Is node.js rmdir recursive ? Will it work on non empty directories?
A:
If you want to achieve something like rm -rf does, there is a package from npm called rimraf which makes it very easy.
|
[
"stackoverflow",
"0020017637.txt"
] | Q:
Make image positioned on another container
My HTML is like this
<div class="container-panel">
<div class="inner-bannerbg">
</div>
<div class="home-second-holder">
<div class="wrapper clearfix">
<ul class="service-menu">
<li>
<a href="/qw/privat/" class="inner-nav inner-nav01 active">Privat</a>
<span class="arrow01"></span>
</li>
<li>
<a href="/qw/lorem/" class="inner-nav inner-nav02">Lorem</a>
</li>
<li>
<a href="/qw/some/" class="inner-nav inner-nav03">some</a>
</li>
<li>
<a href="/qw/numquam/" class="inner-nav inner-nav04">numquam</a>
</li>
<li>
<a href="/loesninger/ducimus/" class="inner-nav inner-nav05">ducimus</a>
</li>
<li>
<a href="/qw/2/" class="inner-nav inner-nav06">2</a>
</li>
<li>
<a href="/qw/4/" class="inner-nav inner-nav07">4</a>
</li>
</ul>
</div>
</div>
<div class="private-block">
</div>
</div>
As you can see there is a span inside first li .And an arrow image is attached to this span .
CSS for this span
ul.service-menu li span.arrow01 {
position: absolute;
width: 100%;
height: 44px;
background: url(../Images/active-arrow01.png) no-repeat center bottom;
left: 0;
bottom: -161px;
}
Using this css I have arranged arrow to come direct under current active li . and position right above my private-block div.This positioning completely relying on my CSS
bottom: -161px;
The problem is that as the number of li is dynamic some times an extra row may come and break my style.If number of li grows there will be extra rows present and that will make this arrow align wrong.To get more in to this I will post the screen here
Can any one help me to make a css that will make arrow reside on top of private-block div ,neverthles how many rows are there.
A:
Define the arrow as the pseudo element of the .wrapper. Don't forget to set position: relative for the .wrapper:
.wrapper {
position: relative;
}
.wrapper::after {
position: absolute;
width: 100%;
height: 44px;
content: url(../Images/active-arrow01.png); /* notice that the image becomes the "content" of the pseudo-element, not the background */
left: 0;
bottom: 0; /* the arrow will always be at the bottom of teh .wrapper */
}
|
[
"magento.stackexchange",
"0000267864.txt"
] | Q:
move minicart before top.search magento 2.2
I have moved the mini cart before top.search using below code in my theme under
app/design/frontend/Mytheme/luma_child/Magento_Checkout/layout/default.xml
<?xml version="1.0"?>
<page xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="urn:magento:framework:View/Layout/etc/page_configuration.xsd">
<body>
<referenceContainer name="header-wrapper">
<block class="Magento\Checkout\Block\Cart\Sidebar" name="minicart" as="minicart" after="top.search" template="Magento_Checkout::cart/minicart.phtml">
-------rest of the code ---------
</referenceContainer>
</body>
</page>
This code moves minicart before top.search successfully but it is breaking the minicart item rendering block
giving error in console like below
Uncaught TypeError: Unable to process binding "if: function(){return
getCartParam('summary_count') }"
Message: Unable to process binding "foreach: function(){return {
data:getCartItems(),as:'item'} }"
Message: Unable to process binding "foreach: function(){return $parent.getRegion($parent.getItemRenderer(item.product_type)) }"
Message: Cannot read property 'simple' of undefined
at UiClass.getItemRenderer (minicart.js:138)
at foreach (eval at createBindingsStringEvaluator (knockout.js:2624), <anonymous>:3:93)
at knockout.js:3889
at Object.init (knockout.js:5023)
at init (knockout.js:3914)
at knockout.js:2989
at Object.ignore (knockout.js:1249)
at knockout.js:2988
at Object.arrayForEach (knockout.js:151)
at applyBindingsToNodeInternal (knockout.js:2974)
Can anyone suggest how to move it properly? Or do I need to make any adjustment in minicart.js?
A:
Try this,
Remove your above code and add this code in default.xml
<?xml version="1.0"?>
<page xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="urn:magento:framework:View/Layout/etc/page_configuration.xsd">
<body>
<move element="minicart" destination="header-wrapper" after="top.search"/>
</body>
</page>
Hope this will work out for you :)
|
[
"stackoverflow",
"0017288893.txt"
] | Q:
Mongodb push to array but keep the array with a max of 3 items
Is there an easy way to push to an array in mongodb such that the array has no more than 3 items. If there are 3 items the push should throw out the first item in the array. I'm looking for a safe way to do this.
A:
@muistooshort is correct.
The 2.4 $push, $each and $slice operators can be used to create a fixed length array.
You can then use the $sort operator to keep the array sorted and have the "last" item removed by the $push.
The documentation here has a great example.
|
[
"dba.stackexchange",
"0000252462.txt"
] | Q:
How to restore a database with innodb tables from files?
I am trying to restore a MariaDB database containing innodb tables from files. Blue-eyed as I am, I considered this an easy task: I just stopped the server, copied the files and restarted it as follows:
# Stop MariaDB server
systemctl stop mariadb.service
# Copied database
sudo rsync --numeric-ids -aHAEXv --rsh="ssh" user@mybackupserver:/xyz/mysql/owncloud /var/lib/
# Start MariaDB server
systemctl start mariadb.service
Unfortunately this does not work as expected. Although the database and all it's tables are there, I can't access any table. Trying to do so I get:
1932 - Table 'owncloud.oc_activity_mq' doesn't exist in engine.
I than tried to create an empty database of the same name before I repeated all steps above, but I got the same error.
Oddly in PHPMyAdmin all tables have the Collation in use.
I checked permissions and ownership of the directory a dozen times, everything within my MariaDB data directory is owned by the user mysql and every file has the permission 0660 every directory 0700…
My question is, is it possible to restore a single database with innodb tables directly from an old data directory to a new one? And if it is, how?
I know this question came up before, for example here: How to restore MySQL database from Files
But I am asking here specifically for a database with innodb engine, as off what I read, I got the impression the problems I ran in, might have to do with the engine
A:
With innodb files you need to restore the entire database server to a new instance.Use mysqldump oc_activity_mq to extract a SQL version of the table out of that backup. And then import this into your active instance.
This assumes you've made consistent backup. If mysql was running and being updated at the same time it was copied it might have some form of corruption.
Revist your backup strategy using innodbbackup, lvm, and/or mysqldump/pump.
|
[
"serverfault",
"0000970247.txt"
] | Q:
centos 7 execute remote script from local
I'm working on a script to sync a folder from Windows to a Linux server with Rsync and I manage to get it work with git bash and the rsync executable.
In Centso I added the syncinguser to the sudoer -> syncinguser ALL=NOPASSWD:/usr/bin/rsync to allow the execution of Rsync, how can I allow the same user to run a .sh file to complete the sync process or a php command?
I want to sync a Laravel application so I need to run the command php artisan cache:clear and php artisan config:cache after the sync has finished.
I would also like to change owner:group like chown -R user:psacln * but I'm quite happy with the fact that doesn't change the rights on the folders E.g: 0777 permission on the public folder.
This is my script in windows so far:
rsync -rt --chown=user:group (being ignored I think because user's permissions)
--exclude ".env" --exclude "storage" --exclude "public/storage" --progress
--rsync-path="sudo rsync" -e "ssh -p <port>"
'/c/path/to/local/folder/' syncinguser@<ip address>:/remote/path/
how can I add something like this after the sync:
ssh yncinguser@<ip address> php artisan cache:clear
ssh syncinguser@<ip address> php artisan config:cache
// OR
ssh syncinguser@<ip address>:/remote/path/to/completesync.sh
where completesync.sh will run the 2 php artisan command and apply recursively the owner:group of all files/folders as when they come from windows they are all root:root
I cannot connect as a root because we blocked it, you need to connect as a normal user and then switch to root.
ssh -p <ssh port> syncinguser@<IP ADDRESSED> '/path/to/script.sh'
permissions denied, same if I connect as a normal user
A:
OK, so you need two scripts: one on the source machine, and another one on the destination machine. The script on the source machine should be something like:
rsync ... ... ... syncuser@destination:/dest/path
ssh syncuser@destination "sudo /some/path/to/completesync.sh"
And on the destination machine, the script /some/path/to/completesync.sh which contains something like this:
#!/bin/sh
php artisan cache:clear
php artisan config:cache
# whatever else you need to run as root
Be careful to have restricted rights on this script:
chown root:root /path/to/completesync.sh && chmod 700 /path/to/completesync.sh
Last, modify /etc/sudoers on the destination machine so that "syncuser" can run both rsync and your script as root:
syncuser ALL=NOPASSWD: /usr/bin/rsync, /path/to/completesync.sh
Now running the script on the source machine should complete the whole process in one operation.
|
[
"stackoverflow",
"0051577282.txt"
] | Q:
How do I load custom image based datasets into Pytorch for use with a CNN?
I have searched for hours on the internet to find a good solution to my issue. Here is some relevant background information to help you answer my question.
This is my first ever deep learning project and I have no idea what I am doing. I know the theory but not the practical elements.
The data that I am using can be found on kaggle at this link:
(https://www.kaggle.com/alxmamaev/flowers-recognition)
I am aiming to classify flowers based on the images provided in the dataset using a CNN.
Here is some sample code I have tried to use to load data in so far, this is my best attempt but as I mentioned I am clueless and Pytorch docs didn't offer much help that I could understand at my level.
(https://pastebin.com/fNLVW1UW)
# Loads the images for use with the CNN.
def load_images(image_size=32, batch_size=64, root="../images"):
transform = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = datasets.ImageFolder(root=root, train=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=2)
return train_loader
# Defining variables for use with the CNN.
classes = ('daisy', 'dandelion', 'rose', 'sunflower', 'tulip')
train_loader_data = load_images()
# Training samples.
n_training_samples = 3394
train_sampler = SubsetRandomSampler(np.arange(n_training_samples, dtype=np.int64))
# Validation samples.
n_val_samples = 424
val_sampler = SubsetRandomSampler(np.arange(n_training_samples, n_training_samples + n_val_samples, dtype=np.int64))
# Test samples.
n_test_samples = 424
test_sampler = SubsetRandomSampler(np.arange(n_test_samples, dtype=np.int64))
Here are my direct questions that I require answers too:
How do I fix my code to load in the dataset in an 80/10/10 split for training/test/validation?
How do i create the required labels/classes for these images which are already divided by folders in /images ?
A:
Looking at the data from Kaggle and your code, there are problems in your data loading.
The data should be in a different folder per class label for PyTorch ImageFolder to load it correctly. In your case, since all the training data is in the same folder, PyTorch is loading it as one train set. You can correct this by using a folder structure like - train/daisy, train/dandelion, test/daisy, test/dandelion and then passing the train and the test folder to the train and test ImageFolder respectively. Just change the folder structure and you should be good. Take a look at the official documentation of torchvision.datasets.Imagefolder which has a similar example.
As you said, these images which are already divided by folders in /images. PyTorch ImageFolder assumes that images are organized in the following way. But this folder structure is only correct if you are using all the images for train set:
```
/images/daisy/100080576_f52e8ee070_n.jpg
/images/daisy/10140303196_b88d3d6cec.jpg
.
.
.
/images/dandelion/10043234166_e6dd915111_n.jpg
/images/dandelion/10200780773_c6051a7d71_n.jpg
```
where 'daisy', 'dandelion' etc. are class labels.
The correct folder structure if you want to split the dataset into train and test set in your case (note that I know you want to split the dataset into train, validation, and test set, but it doesn't matters as this is just an example to get the idea out):
```
/images/train/daisy/100080576_f52e8ee070_n.jpg
/images/train/daisy/10140303196_b88d3d6cec.jpg
.
.
/images/train/dandelion/10043234166_e6dd915111_n.jpg
/images/train/dandelion/10200780773_c6051a7d71_n.jpg
.
.
/images/test/daisy/300080576_f52e8ee070_n.jpg
/images/test/daisy/95140303196_b88d3d6cec.jpg
.
.
/images/test/dandelion/32143234166_e6dd915111_n.jpg
/images/test/dandelion/65200780773_c6051a7d71_n.jpg
```
Then, you can refer to the following full code example on how to write a dataloader:
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as data
import torchvision
from torchvision import transforms
EPOCHS = 2
BATCH_SIZE = 10
LEARNING_RATE = 0.003
TRAIN_DATA_PATH = "./images/train/"
TEST_DATA_PATH = "./images/test/"
TRANSFORM_IMG = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] )
])
train_data = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH, transform=TRANSFORM_IMG)
train_data_loader = data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
test_data = torchvision.datasets.ImageFolder(root=TEST_DATA_PATH, transform=TRANSFORM_IMG)
test_data_loader = data.DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
class CNN(nn.Module):
# omitted...
if __name__ == '__main__':
print("Number of train samples: ", len(train_data))
print("Number of test samples: ", len(test_data))
print("Detected Classes are: ", train_data.class_to_idx) # classes are detected by folder structure
model = CNN()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
loss_func = nn.CrossEntropyLoss()
# Training and Testing
for epoch in range(EPOCHS):
for step, (x, y) in enumerate(train_data_loader):
b_x = Variable(x) # batch x (image)
b_y = Variable(y) # batch y (target)
output = model(b_x)[0]
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_x = Variable(test_data_loader)
test_output, last_layer = model(test_x)
pred_y = torch.max(test_output, 1)[1].data.squeeze()
accuracy = sum(pred_y == test_y) / float(test_y.size(0))
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data[0], '| test accuracy: %.2f' % accuracy)
|
[
"stackoverflow",
"0043126207.txt"
] | Q:
Android/Xamarin: How to launch dialog with broadcast receiver
I want my app to listen for intends broadcasted by the call application, and when a call intend is broadcasted for a specific number I want to launch a dialog. I read that "A broadcast receiver may not display dialogs, and it is strongly discouraged to start an activity from within a broadcast receiver" https://developer.xamarin.com/guides/android/application_fundamentals/broadcast-receivers/ so I am assuming I should instead make the broadcast receiver launch a service that then launches a dialog. Can anyone confirm this? Also any simplified examples would be highly appreciated
Thanks!
A:
In the example below, the app uses a BroadcastReceiver to detect a phone call number and decide whether it should answer or not:
How to reject incoming call programatically in android?
So using a BroadcastReceiver for that isn't that bad.
If you just want to show information about the phone call, you can display an Notification, as suggested by Jon Douglas in the comments. Displaying Dialogs from BroadcastReceivers isn't allowed (also disencouraged).
|
[
"stackoverflow",
"0003878815.txt"
] | Q:
How to obtain the current time differences between two timezones
I want to calculate the current time differences between US/Central timezone and British Summer Time. I mean, currently these both timezones have daylight savings going on, so they have a 6 hours time difference. But after Sunday October 31 2010, daylight savings will be off for British summer time, at which moment there will be a 5 hours time differences between these two timezones.
Is there any way I can calculate these varying time differences?
A:
Just to provide some concrete code for the answers given, here's some code to work out the current difference between me (in London) and my colleagues in Mountain View:
using System;
class Test
{
static void Main()
{
var london = TimeZoneInfo.FindSystemTimeZoneById
("GMT Standard Time");
var googleplex = TimeZoneInfo.FindSystemTimeZoneById
("Pacific Standard Time");
var now = DateTimeOffset.UtcNow;
TimeSpan londonOffset = london.GetUtcOffset(now);
TimeSpan googleplexOffset = googleplex.GetUtcOffset(now);
TimeSpan difference = londonOffset - googleplexOffset;
Console.WriteLine(difference);
}
}
A:
You can create two datetime object from different timezones, a good example: Creating a DateTime in a specific Time Zone in c# fx 3.5
And calculate the Delta between them.
|
[
"stackoverflow",
"0023329586.txt"
] | Q:
Why am I getting an Ada error?
This is, as you could tell, a programming assignment for a class. It's far over due and I'm not getting any points however there is a test coming up soon and I'd rather like to know how to use the specific functions in ADA. The program is different now, when I run it with test put statements in the initial procedure, GetStudent, it will output them fine. However now it goes to the bottom, line 96, and gets an end error
with Ada.Text_IO;
use Ada.Text_IO;
with Ada.Integer_Text_IO;
USE Ada.Integer_Text_IO;
WITH Ada.Float_Text_IO;
USE Ada.Float_Text_IO;
procedure StudentFileLab is
------------------------------------------------------------------------
--| This program stores records in an array, reading them in from a file
--| and writing them out.
------------------------------------------------------------------------
subtype NameType is String(1..30);
subtype IDType is natural range 0..9999;
subtype GPAType is float range 0.0..4.0;
type StudentRecord is record
ID : IDType;
GPA : GPAType;
Name : NameType := (others => ' ');
end record;
subtype StudentIndex is integer range 1..100;
TYPE StudentArrayType IS ARRAY (StudentIndex) OF StudentRecord;
-- Specification of input procedure. You are to write the body.
PROCEDURE GetStudent (File: IN File_Type; Student : OUT StudentRecord) is
Length : Integer;
BEGIN
For I In 0..Length Loop
Get(File, Student.ID);
For J In 0..Length Loop
Get(File, Student.GPA);
For K In 0..Length Loop
Get_Line (File, Student.Name, Length);
End Loop;
END LOOP;
End LOOP;
END GetStudent;
PROCEDURE PutStudent (Student : IN StudentRecord) IS
BEGIN
FOR Studentlist IN StudentIndex LOOP
Put (Student.ID,
width => 0);
Put (Item => " ");
Put (Student.GPA,
Exp => 0,
Fore=> 0,
Aft => 0);
Put (Item => ", ");
Put (Student.Name);
New_Line;
END LOOP;
END PutStudent;
StudentList : StudentArrayType; -- our array of students
CurrentIndex : Natural := 0; -- the index to the current array item
CurrentStudent : StudentRecord; -- the current student
Filename : String(1..30); -- name of the data file
Flength : Integer; -- length of the name of the data file
Infile : File_Type;
begin -- StudentLab
Put_Line(Item=>"Welcome to the Student Information Program.");
Put_Line(Item=>"Please enter the student filename: ");
Get_Line(Item=> Filename, Last => Flength);
Open(File => Infile, Mode => In_File, Name => Filename(1..Flength));
loop
-- Get the next student
GetStudent(File=> infile, Student => CurrentStudent);
exit when CurrentStudent.Id = 0;
CurrentIndex := CurrentIndex + 1;
StudentList(CurrentIndex) := CurrentStudent;
END LOOP;
close(file=>infile); -- close the data file after all data is read.
-- Output the header for the nicely formatted output.
FOR Index IN 1..CurrentIndex loop
PutStudent(Student => StudentList(Index));
end loop;
end StudentFileLab;
The Program is supposed to read from a file that looks like this.
1435 3.75 Jane Smith
2233 2.94 Robert Robertson
9634 3.86 Jennie Diver
4325 3.42 Matt Pratt
0
So, Line 96 is literally the end loop line.
FOR Index IN 1..CurrentIndex loop
PutStudent(Student => StudentList(Index));
-----> end loop;
I may be wrong but I feel like my main problem is now with the body of PutStudent seen here:
FOR Studentlist IN StudentIndex LOOP
Put (Student.ID,
width => 0);
Put (Item => " ");
Put (Student.GPA,
Exp => 0,
Fore=> 0,
Aft => 0);
Put (Item => ", ");
Put (Student.Name);
New_Line;
END LOOP;
I feel like it's the for line but I can't tell how to fix it.
A:
You aren’t getting End_Error at line 96 of your program, but in the runtime library.
When I run your program, I get
raised ADA.IO_EXCEPTIONS.END_ERROR : a-tigeli.adb:96
which is in fact in Ada.Text_IO.Get_Line.
When I compile your program with all warnings on, I get
studentfilelab.adb:35:19: warning: "Length" may be referenced before it has a value
and, looking at the code, this is
PROCEDURE GetStudent (File: IN File_Type; Student : OUT StudentRecord) is
Length : Integer;
BEGIN
For I In 0..Length Loop <<<<<<<< line 35
Get(File, Student.ID);
For J In 0..Length Loop
Get(File, Student.GPA);
For K In 0..Length Loop
Get_Line (File, Student.Name, Length);
End Loop;
END LOOP;
End LOOP;
END GetStudent;
So, firstly, you would need to set a value for Length; but, more importantly, this procedure is supposed (judging by its name and the context) to read the data for one student, so what are you doing looping in the first place?
GetStudent needs work even after this (you mustn’t try to read anything after the 0 that ends the input data) and I think there are more problems, but that should do to be going on with.
|
[
"math.stackexchange",
"0003516176.txt"
] | Q:
How do I refer to absolute change in 2 percentages?
I'm writing a paper and I have 45% WER (word error rate) under one condition. Under a different condition, that improves to a 34% WER. It seems incorrect to say that I had an 11% improvement in WER. What would be the correct way to state this?
A:
You can say, that:
The error has decreased by $11$ percentage points
or
The error has decreased by $24.4\%$
|
[
"diy.stackexchange",
"0000114436.txt"
] | Q:
Dehumidifier with multiple float switches
I just bought a dehumidifier for my soon to be finished basement. Based on the basement layout I also got a condensate pump so I can pump up the water to drain it above the dehumidifier. The dehumidifier has wiring terminals for a normally closed float switch (which the condensate pump has built in) that will shut off the dehumidifier if there is a problem. I was hoping to put the whole setup in a condensate pan in case something goes wrong with the dehumidifier/drain (is this overkill?). The installation manual says:
Install a condensate overflow safety switch (i.e. float switch) in the condensate pan, remove the factory installed jumper wire between the Float Switch terminals on the control and wire the float switch to the dehumidifier as shown in figure 10. Overflow safety switches on condensate pumps can be wired to the Float Switch terminals in a similar fashion.
Which to my naive reading sounds like they are suggesting putting the switches in parallel. Am I correct that since the switches are normally closed, they need to go in series?
A:
Your instructions are basically telling you this:
Remove the JUMPER from Float Switch terminals.
Wire the Float Switch to the terminals (you should have two wires - one wire goes to one terminal the other wire to the other terminal) Does not matter which wire just that they are wired one to one terminal and the other wire to the other terminal.
The Switch makes the connection or breaks the connection. Considering it is Jumpered you want the Switch to open when the overflow level is reached.
To Answer your question about series or parallel:
Both Switches Normally Closed in Series.
A AND B = ON
A Closed / B Open = OFF ,
A Open / B Closed = OFF ,
A Closed / B Closed = ON ,
A Open/ B Open = OFF.
Both Switches Normally Closed in Parallel.
A OR B = ON
A Closed / B Open = ON,
A Open / B Closed = ON,
A Closed / B Closed = ON ,
A Open/ B Open = OFF.
|
[
"stackoverflow",
"0055645819.txt"
] | Q:
"TypeError: Cannot read property 'content' of null" Running in Jest
I have a problem if I run my jest test suite.
Site note, I'm using vue-test-utils.
My vue data looking like this:
data() {
return {
token: document.head.querySelector('meta[name="csrf-token"]'),
};
},
My element is this:
<input
type="hidden"
name="_token"
:value="token.content"
>
If I run this test suite:
beforeEach(() => {
const csrfToken = 'mocked-csrf-token';
document.head.innerHTML = `<meta name="csrf-token" content="${csrfToken}">`;
wrapper = shallowMount(NavbarDropdownProfileFooter, { attachToDocument: true });
});
I get this error:
TypeError: Cannot read property 'content' of null
A:
Your issue lies at the selection of the meta element. Your selection results in null, therefore you get the described TypeError. meta elements are not special. Remove accessing the head prop.
document.querySelector('meta[name="csrf-token"]')
|
[
"stackoverflow",
"0041447288.txt"
] | Q:
how to import highcharts offline-exporting in typescript
Tired out the following configurations but they dont seem to work.
import * as Highcharts from 'highcharts/highstock';
/*import * as HighchartsExporting from 'highcharts/modules/exporting';
HighchartsExporting(Highcharts);*/
require('highcharts/modules/offline-exporting')(Highcharts);
get the following error:
Cannot invoke an expression whose type lacks a call signature. Type '{}' has no compatible call signatures.
anyone knows how to get this working.
A:
here is a solution
import * as Highcharts from 'highcharts/highstock';
import * as HighchartsExporting from 'highcharts/modules/exporting';
HighchartsExporting(Highcharts);
this is good enough to work, a separate require command is not required.
|
[
"stackoverflow",
"0007352016.txt"
] | Q:
Problem to stop Auto Bot to registertration form
Auto bot is registering dummy users to my website. Today, I have implemented Captcha with additions of two numbers and store the result in SESSION variable. Only human can understand it and proceed with registration process. But I'm really confused that same bot is parsing the SESSION variable and entering correct value of addition of two numbers and get registered in the website with dummy user.
Is there any other feasible method to stop this bot to register dummy users?
I'm using Zen cart.
A:
Try using reCaptcha. It is most likely a more advanced solution to your CAPTCHA issue.
|
[
"stackoverflow",
"0045947222.txt"
] | Q:
DJANGO Queryset with datetime: Need to get all future dated entries
How would I get all the entries in the model Appointment with a future date using datetime?
The attribute in question is date.
Appointment.object.filter(date = ???? )
A:
You can use the gte (greater than or equal to) lookup:
>>> from datetime import datetime
>>> today = datetime.today()
>>> Appointment.object.filter(date__gte=today)
|
[
"stackoverflow",
"0038286015.txt"
] | Q:
How to prevent WebStorm IDE from opening '.html' files in a web browser on double-clicking it?
I am trying to name a HTML file as index.html in the WebStorm IDE. However, I am unable to open this file to edit it if I name it index - it directly opens in the browser.
I can open other .html files in WebStorm for editing if they are named something other than index.html.
If I go to Refactor > Rename and change the name of index.html file to something else, I am again not able to edit it.
It seems like there must be a setting for this somewhere but I cannot figure out what or where.
A:
Most likely you have told IDE to open it like that in the past (on purpose or by mistake) .. so it keeps doing it.
Settings/Preferences | Editor | File Types
Find Files Opened in Associated Application entry in top list
Find and remove offending entry from bottom list (most likely will be index.html or similar)
|
[
"stackoverflow",
"0018233946.txt"
] | Q:
Websites rich with exercices or explanation for SML?
I know that I need to pass through google to search etc, I did but they don't offer what I need 90% of this sites give the same example and even sometimes the same explanation, If there someone who have for example links from university or school or may be in his dropbox, I will be very thankfull.
A:
Unfortunately it seems that the best resources for learning Standard ML are various paper books (e.g. Introduction to Programming using SML by Hansen & Rischel, ML for the Working Programmer by Larry C. Paulson, or Programming in Standard ML by Robert Harper (the last one is available for free online), as well as resource material from various university courses of which none I have encountered excels.
Here is a number of exam assignments from one course using Standard ML:
|
[
"stackoverflow",
"0026394883.txt"
] | Q:
Useful alternative uses for yield beyond being an anonymous method?
Example:
a = Proc.new do
b = 'hey'
end
def a_method
yield
end
a_method(&a) #=> 'hey'
I understand that yield (or block.call) can be used as a simple co-routine, but I was wondering, are there any more (practical) uses for it beyond simply getting the return value from it? Can we get some of the local variables from the proc into the main method etc.?
A:
If you're not picky about the return value of the block, you can do it using binding.
a = Proc.new do
b = 'hey'
binding
end
def a_method
new_binding = yield
p new_binding.eval("b")
end
a_method(&a)
# => "hey"
If you're using a version of Ruby >= 2.1.0, you can avoid the ickiness of eval by using local_variable_get instead:
p new_binding.local_variable_get(:b)
|
[
"math.stackexchange",
"0000865397.txt"
] | Q:
Does $E[XY|Z]=E[XY]$?
Assume $E[X|Z]=E[X]$. Assume $Y$ and $Z$ are independent. Does $E[XY|Z]=E[XY]$? Can you prove it?
My intuition says $E[XY|Z]=E[XY]$ but expanding the expectations into integrals I couldn't prove it.
A:
No, it is not true. We will find a counterexample using a suggestion by @Did to show that $E[XY|Z] \neq E[XY]$.
Let $X=UZ$ and $Y=U$ with $U \sim Unif(-1,1)$ independent of Z, then $Y$ and $Z$ are independent and $E[X|Z]=E[UZ|Z]=ZE[U]=0$. But, $E[XY|Z]=E[ZU^2|Z]=ZE[U^2]=Z/3$ and $E[XY]=E[ZU^2]=E[Z]/3$.
|
[
"russian.stackexchange",
"0000000382.txt"
] | Q:
What are the common inflection mistakes that Russian kids make?
What are the common inflection mistakes that Russian kids make as they progress in mastering their first language?
A:
It's typical of children (of Russian children, at least) to experiment with morphology. Korney Chukovsky wrote a book 'from 2 to 5' about his experience of childern from 2 to 5 understanding the undercurrent rules of Russian. It states such tendencies as
experimenting (- Дай другую логу! - логу instead of ложку. In fact, why not :) ? But we never say so, of course).
creating one's own etymology: Паутина (cobweb) -> паукина (as it has a spider, or паук there).
Widening the semantics: - Облака-то сняли! Сняли с неба облака! (we Russians don't 'take off' clouds from the sky).
I guess that's about it but the examples are fantastic! If you're interested in the book, you can find it on http://www.chukfamily.ru/Kornei/Prosa/Ot2do5/Ot2do5.htm. If you don't understand the Russian there, you can mail me to [email protected]. I've read the book and enjoyed it and I hope that you will enjoy it too!
In fact, as a linguist, I had lectures on children's language. So feel free to torture me with your curiosity :)))
|
[
"stackoverflow",
"0018120315.txt"
] | Q:
.NET HTTPClient Asynchronous Limitations
I have a small .Net 4.5 C# app which reads information from a data source and then pushes this information to a web site which is a .NET 4.5 Web API site with a simple controller. The controller receives the data and puts it into a database.
The following works for me, as fast as the application can read it can write and everything ends up in the DB:
public static void PostDataToWebApi(MyDataClass tData)
{
HttpResponseMessage s = null;
try
{
s = client.PostAsJsonAsync("/api/Station/Collector", tData).Result;
s.EnsureSuccessStatusCode();
}
catch (Exception e)
{
Console.WriteLine("ERROR (ClientPost): " + e.ToString());
}
}
The following does NOT work. It POSTs about a thousand-odd records and then comes up with a number of errors all with the message "a task was canceled", but then after about 10 seconds it resumes processing:
public static async void PostDataToWebApi(MyDataClass tData)
{
HttpResponseMessage s = null;
try
{
s = await client.PostAsJsonAsync("/api/Station/Collector", tData);
s.EnsureSuccessStatusCode();
}
catch (Exception e)
{
Console.WriteLine("ERROR (ClientPost): " + e.ToString());
}
}
The full error is:
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
at System.Runtime.CompilerServices.TaskAwaiter`1.GetResult()
at IICE_DataCollector_Remote.Program.<PostDataToWebApi>d__7.MoveNext() in e:\Users\TestUser.TEST\Documents\Visual Studio 2012\Projects\Test_App-trunk\TestCollector\Program.cs:line 475
Any quick fixes for this? From what I can tell it runs out of something, threads, sockets, who knows :-)
Any pointers would be appreciated, I'd love to get this working, as you can imagine doing the POST synchronously is considerably slower than asynchronously.
Just to be sure it wasn't my machine, local anti-virus or network I have tried on a W2k8 R2 server, a Windows 7 virtual guest desktop (fresh build) and a Windows 8 machine as well, with the same result.
More Info : I have tested this with partial success from a LAN connection with a smaller data set (10,000 records), and a DefaultConnectionLimit of 100. But, in production with 500,000 records, when posting to a remote server across the Internet (still low latency 25ms-50ms) I have not had any success.
Thanks in advance for any help :-)
A:
Ok, I have it working now. The biggest thing was to fine-tune the settings on the client end, for the server. These settings were different depending on whether I was running a test locally or over the Internet.
My "PostDataToWebApi" method now looks like this:
public static async void PostDataToWebApi(MyDataClass tData)
{
await throttler.WaitAsync();
allTasks.Add(Task.Run(async () =>
{
try
{
var s = await client.PostAsJsonAsync("/api/Station/Collector", tData).ConfigureAwait(false);
}
catch (Exception e)
{
Console.WriteLine("ERROR (ClientPost): " + e.ToString());
}
finally
{
throttler.Release();
}
}));
}
I have the following declared at the top of my console application:
private static List<Task> allTasks = new List<Task>();
private static SemaphoreSlim throttler;
Before my loop starts I have the following, with the variables changed to make sure it all works:
ServicePointManager.DefaultConnectionLimit = _DefaultConnections;
ServicePointManager.MaxServicePointIdleTime = _MaxIdleTime;
ServicePointManager.Expect100Continue = false;
ServicePointManager.CheckCertificateRevocationList = false;
throttle = new SemaphoreSlim(initialCount: _MaxQueue);
As a guide, for an Internet based transaction the following works for me:
Default Connections : 24
Max Idle Time : 400
SemaphoreSlim Initial Count: 50
For my LAN test I could run both the default connections and the initial count values higher without a problem, which is to be expected I guess :-)
Finally, just outside my look I have the following to make sure I don't kill any tasks still running at the end of my execution run:
await Task.WhenAll(allTasks);
Hope this helps!
|
[
"stackoverflow",
"0060135921.txt"
] | Q:
Adding two Var numbers together?
I assigned three cells of numbers and then I wanted to sum them together,
var num1, num2, num3: number;
num1 = prompt("Enter num1:");
num2 = prompt("Enter num2:");
num3 = prompt("Enter num3:");
document.write((num1 + num2 + num3));
alert((num1 + num2 + num3));
and if i enter 10 , 20 , 30 the out put is : 102030 ,
and its need to be 60...
Thanks for the helpers.
A:
The prompt() function work with string Values,
If you want to return type number add a (+) after the (=),
Take a look at this code and understand better what I meant :
let testPrompt1;
let testPrompt2;
testPrompt1 = prompt("Enter test 1:"); // Enter number
testPrompt2 = + prompt("Enter test 2:"); // Enter number
document.write("Type of test 1" + typeof testPrompt1 ); // String !!
document.write("Type of test 2" + typeof testPrompt2 ); // Number !!
// Add (+) before the (=) !!!!
let num1, num2, num3;
num1 =+ prompt("Enter num1:"); // 10
num2 = + prompt("Enter num2:");// 20
num3 = + prompt("Enter num3:");// 30
document.write((num1 + num2 + num3)); // Output 60
alert((num1 + num2 + num3)); // Output 60
I also suggest adding a variable of the result and not printing everything in one line for a better understanding of the code.
let num1, num2, num3, res;
num1 = + prompt("Enter num1:");
num2 = + prompt("Enter num2:");
num3 = + prompt("Enter num3:");
res = num1 + num2 + num3;
document.write("Result : " + res); // Output 60
alert("Result : " + res); // Output 60
|
[
"stackoverflow",
"0018900718.txt"
] | Q:
Conditional objects in access report?
I have a table with 2 different sets of customers form 2 different company, marked by an A or B in the company column. I am creating a report for invoices, i need to change the company name and details depending on it being company a or company b. Is this even possible. If not does anyone know of a good solution, or another option.?
A:
Sounds like you need a table called tblCompany, with an Identifier column (A, B, etc...) and all other info like Name, Address, etc... Then you would make a query that joins your first table to tblCompany, on the Identifier column. Your report would be based off that query, where you could pull in the Name from tblCompany, and all the other info from the first table.
Unless there's something I'm not understanding?
|
[
"stackoverflow",
"0014074258.txt"
] | Q:
Android ItemizedOverlay scaling canvas
i am trying to scale pictures in a Android map itemizedOverlay,
i got it working to the point where i can see 10 pictures, i got zoomControle but nothing else really,
this is the MapItems class that extends ItemizedOverlay, optimizations is welcome
import java.util.ArrayList;
import android.content.Context;
import android.graphics.drawable.Drawable;
import android.util.Log;
import com.google.android.maps.ItemizedOverlay;
import com.google.android.maps.MapView;
import com.google.android.maps.OverlayItem;
public class MapItems extends ItemizedOverlay
{
private ArrayList<OverlayItem> mOverlays = new ArrayList<OverlayItem>();
Context mContext;
public MapItems(Drawable defaultMarker)
{
super(boundCenterBottom(defaultMarker));
}
@Override
public void draw(android.graphics.Canvas canvas,MapView mapView,boolean shadow)
{
/*
Log.d("MapAc", String.valueOf(mapView.getZoomLevel()));
if(mapView.getZoomLevel() > 20)
{
Log.d("MapAc", "scaling up");
canvas.scale(1.2f, 1.2f);
}
*/
super.draw(canvas,mapView,false);
}
public MapItems(Context context)
{
super(boundCenterBottom(context.getResources().getDrawable(R.drawable.app_icon_clean)));
mContext = context;
}
public void addOverlay(OverlayItem overlay)
{
mOverlays.add(overlay);
populate();
}
public void clearOverlay()
{
mOverlays.clear();
}
@Override
protected OverlayItem createItem(int i)
{
return mOverlays.get(i);
}
@Override
public int size()
{
return mOverlays.size();
}
@Override
protected boolean onTap(int index)
{
/* ToDo
OverlayItem item = mOverlays.get(index);
AlertDialog.Builder dialog = new AlertDialog.Builder(mContext);
dialog.setTitle(item.getTitle());
dialog.setMessage(item.getSnippet());
dialog.show();
*/
return true;
}
}
i have been trying to scale in the draw method, using canvas.Scale, however this seems to redraw the canvas in another location, together with the old canvas"in its original size",
i am not sure if i am approaching this problem from the right angle, or if it is simply a matter of clearing the screen, i have been using a few days to figure this out, so a method to scale my pictures correct when zooming is VERY appreciated,
A:
Use the new MAPS API V2
Blog article
Video
|
[
"stackoverflow",
"0032875523.txt"
] | Q:
Why won't opendir() open a path after converting the path type with c_str()?
I'm trying to open a directory, the name of which (path) is currently in a std::string read in originally from a .csv file (although I don't think that changes anything about the string itself). Calling opendir(path.c_str()) returns NULL. I tried the following code, doing the conversion outside of opendir():
DIR *dir;
bool first = True;
string level = "";
struct dirent *ent;
const char * c = path.c_str();
// A
if ((dir = opendir(c)) != NULL){
// do stuff
// should open the directory and go here
}else{
// always ends up here
}
While this failed with path="LeanDataBase", a directory in the project folder, substituting opendir("LeanDataBase") for opendir(c) does seem to open the directory. However, this function is recursive, so I can't hard code this value or it doesn't work and falls into an infinite loop.
I also tried printing the types, with the following two lines inserted right after "A" in the previous code:
cout << typeid(c).name() << endl;
cout << typeid("LeanDataBase").name() << endl;
Which yielded the following output:
PKc
A13_c
Does this mean that I'm passing the wrong type to opendir()? It seems like it can handle PKc, but not A13_c. Is there a way to convert the path string to the proper type?
A:
Looking at my crystall ball, I see the following issue: path is modified (or even leaves the scope) after path.c_str() is called, but before opendir() is called. It is usually a bad practice to remember result of c_str() in any variable, as it leads to issues like this. c_str() is intended for in-place usage, like following
opendir(path.c_str());
|
[
"stackoverflow",
"0052677493.txt"
] | Q:
Get matrix of handwritten digit in HTML canvas with javascript
I am trying to get the matrix of an object drawn on a canvas. I have a 500*500 canvas which needs to be converted to 28*28 to facilite compatibility with the MNIST dataset.
However, when I try to print the matrix in the console, I only get zeros:
[0,0,0,0,0,0,0,0,0,0,0,0,0.....]
Here's my code:
var c = document.getElementById('c');
var ctx = c.getContext('2d')
var paint = false
var hidden = false
window.onmousedown = toggle
window.onmousemove = draw
window.onmouseup = drawoff
function toggle() {
if (paint) {
paint = false;
} else {
paint = true;
}
}
function draw(e) {
var rect = c.getBoundingClientRect();
if (paint && !hidden) ctx.fillRect(e.x - rect.left, e.y - rect.top, 50, 50)
}
function drawoff() {
paint = false;
}
function clear() {
ctx.clearRect(0, 0, 500, 500);
}
window.save = function() {
var digit = new Image();
digit.src = c.toDataURL();
c.width = 28
c.height = 28
ctx.drawImage(digit, 4, 4, 20, 20);
document.getElementById('img').src = c.toDataURL();
// document.getElementById('c').style.display = 'none';
hidden = true
var imgData = ctx.getImageData(0, 0, 28, 28);
var imgBlack = []
for (var i = 0; i < imgData.data.length; i += 4) {
if (imgData.data[i + 3] === 255) imgBlack.push(1)
else imgBlack.push(0)
}
var dataStr = JSON.stringify(imgData)
console.log(dataStr)
}
canvas { border: solid 1px black }
<div class="row canvas-button-css">
<button class="btn btn-success" id="save" onclick="save()">Generate Image</button>
<button class="btn btn-warning" id="clear" onclick="clear()">Clear Canvas</button>
</div>
<div class="row canvas-row-css">
<img id="img" style="">
<canvas id='c' class="canvas-css" width='500' height='500'></canvas>
</div>
(Or here on Jsfiddle)
PS: In the save function, the imgData matrix is also zeros, so it is not a problem with the conversion to imgBlack. Any help would be appreciated on how to properly get the image data onto the imgData matrix.
A:
You can use a canvas as image. I'm using a buffer canvas (unattached to the DOM) c1 to draw the image from the c canvas:
let c1 = document.createElement("canvas");
let ctx1 = c1.getContext('2d')
c1.width = 28
c1.height = 28
ctx1.drawImage(c, 4, 4, 20, 20);// c is the first canvas
The c1 canvas is 28/28.
var c = document.getElementById('c');
var ctx = c.getContext('2d')
var paint = false
var hidden = false
window.onmousedown = toggle
window.onmousemove = draw
window.onmouseup = drawoff
function toggle() {
if (paint) {
paint = false;
} else {
paint = true;
}
}
function draw(e) {
var rect = c.getBoundingClientRect();
if (paint && !hidden) ctx.fillRect(e.x - rect.left, e.y - rect.top, 50, 50)
}
function drawoff() {
paint = false;
}
function clear() {
ctx.clearRect(0, 0, 500, 500);
}
window.save = function() {
let c1 = document.createElement("canvas");
let ctx1 = c1.getContext('2d')
c1.width = 28
c1.height = 28
ctx1.drawImage(c, 4, 4, 20, 20);
document.getElementById('img').src = c1.toDataURL();
// document.getElementById('c').style.display = 'none';
hidden = true
var imgData = ctx1.getImageData(0, 0, 28, 28);
var imgBlack = []
for (var i = 0; i < imgData.data.length; i += 4) {
if (imgData.data[i + 3] === 255) imgBlack.push(1)
else imgBlack.push(0)
}
var dataStr = JSON.stringify(imgData)
console.log(dataStr)
}
canvas { border: solid 1px black }
<div class="row canvas-button-css">
<button class="btn btn-success" id="save" onclick="save()">Generate Image</button>
<button class="btn btn-warning" id="clear" onclick="clear()">Clear Canvas</button>
</div>
<div class="row canvas-row-css">
<img id="img" style="">
<hr>
<canvas id='c' class="canvas-css" width='500' height='500'></canvas>
</div>
|
[
"stackoverflow",
"0023068875.txt"
] | Q:
parse json array in jquery in foreach loop
I'm trying to parse json array with objects and use them to create multiple checkboxes.
This is what I have:
JSON Data:
[{
"ID": 1,
"Name": "Bacon",
"Description": "",
"Price": 0
}, {
"ID": 2,
"Name": "Beef",
"Description": "",
"Price": 0
}, {
"ID": 3,
"Name": "Chicken",
"Description": "",
"Price": 0
}, {
"ID": 4,
"Name": "Ham",
"Description": "",
"Price": 0
}]
In the JS code I have this:
success: function (data) {
var objects = JSON.stringify(data);
for (var key in objects) {
var checkBox = "<input type='checkbox' data-price='" + key.Price + "' name='" + key.Name + "' value='" + key.ID + "'/>" + key.Name + "<br/>";
$(checkBox).appendTo('#modifiersDiv');
};
$('#addModifiers').modal('show');
}
But the key object doesn't contain any data. My question is how I can do foreach loop and get the data I need and fetch that data in the checkbox properties.
Thanks in advance, Laziale
A:
Your data should already be a javascript array because you've specified the JSON type for the jQuery Ajax call so it should have already parsed the JSON into javascript. As such, you can just directly iterate it as the array:
success: function (data) {
for (var i = 0; i < data.length; i++) {
var checkBox = "<input type='checkbox' data-price='" + data[i].Price + "' name='" + data[i].Name + "' value='" + data[i].ID + "'/>" + data[i].Name + "<br/>";
$(checkBox).appendTo('#modifiersDiv');
}
$('#addModifiers').modal('show');
}
Or, if you want to use jQuery's .each() iterator instead of a for loop, you can do this:
success: function (data) {
$.each(data, function(key, item) {
var checkBox = "<input type='checkbox' data-price='" + item.Price + "' name='" + item.Name + "' value='" + item.ID + "'/>" + item.Name + "<br/>";
$(checkBox).appendTo('#modifiersDiv');
});
$('#addModifiers').modal('show');
}
A:
You shouldn't be using var objects = JSON.stringify(data); since the data is already a JSON object.
Use JSON.stringify to create a string from a object
Use JSON.parse is to create an object from a string
Example:
var data = [{id: 1, name:'personsName'}, {id: 2, name:'personsName2'}]
var string = JSON.stringify(data)
var json = JSON.parse(string)
You can loop trough the data and append by using:
data.forEach(function(key, index){
$("#modifiersDiv")
.append($("<input></input>")
.attr("type", "checkbox")
.attr("data-price",key.Price )
.attr("name",key.Name )
.attr("value",key.ID)
.text(key.Name);
}
|
[
"askubuntu",
"0001090388.txt"
] | Q:
what's the shell command for dash screenshot application?
I know in ubuntu, I can use Screenshot application using which I can take a screen shot for selected window. This means there is the application already on ubuntu. What's the command line command for that application? (I know gnome-screenshot lets us take the whole screenshot, but I want to select a region when taking the shot.)
A:
In order to grab a screenshot of window, use this command
gnome-screenshot -w
In order to grab an area, use this command
gnome-screenshot -a
here is the link for source http://manpages.ubuntu.com/manpages/bionic/man1/gnome-screenshot.1.html
-c, --clipboard
Send the grab directly to the clipboard.
-w, --window
Grab the current active window instead of the entire screen.
-a, --area
Grab an area of the screen instead of the entire screen.
-b, --include-border
Include the window border within the screenshot.
-B, --remove-border
Remove the window border from the screenshot.
-p, --include-pointer
Include the pointer with the screenshot.
-d, --delay=SECONDS,
Take the screenshot after the specified delay [in seconds].
-e, --border-effect=EFFECT,
Add an effect to the outside of the screenshot border. EFFECT can be ``shadow''
(adding drop shadow), ``border'' (adding rectangular space around the screenshot)
or ``none'' (no effect). Default is ``none''.
-i, --interactive
Interactively set options in a dialog.
-f, --file=FILENAME
Save screenshot directly to this file.
--display=DISPLAY
X display to use.
-?, -h, --help
Show a summary of the available options.
|
[
"stackoverflow",
"0013471908.txt"
] | Q:
JQuery get value from one td to another
I have this code
<table class="X">
<tr>
<td class="Y">Value</td>
</tr>
</table>
<table class="Z">
<tr>
<td><input class="edit-Y" type="text"></td>
</tr>
</table>
I need to get value from td with class "Y" to input with class "edit-Y" with jQuery.
I tried to code scripts in a different way, but anytime i got empty field or [Object Object]. Any ideas?
Thanks!
A:
Try this,
$('.edit-Y').val($('.Y').html());
Additional answer for the comment
If you have html like the following and you want to select each value in td and insert it the specific input boxes,
<table class="X">
<tr>
<td class="Y1">Value1</td>
<td class="Y2">Value2</td>
<td class="Y3">Value3</td>
</tr>
</table>
<table class="Z">
<tr>
<td><input class="edit-Y1" type="text">
<input class="edit-Y2" type="text">
<input class="edit-Y3" type="text"></td>
</tr>
</table>
jquery would do like this,
$(function(){
$('td[class^=Y]').each(function(){
var tdValue = $(this).html();
var id = $(this).attr('class');
$('.edit-'+id).val(tdValue);
});
});
|
[
"math.stackexchange",
"0001380234.txt"
] | Q:
Itō Integral multiplied by Riemann Integral
I was wondering whats the result of an Itō integral multiplied by a Riemann Integral. For example, what is $$\left(\int_0^T f(u)\ \mathsf dW_u\right)\left(\int_0^T g(v)\ \mathsf dv\right)$$ where $W$ is a standard Wiener process. Since $\mathsf dW_t\mathsf dt=0$, does that mean the multiplication of the two integrals is also zero? Is there a formal way of proving the result? Thanks in advance for any help.
A:
Why it is not $0$:
The expression $\text{d}W_t\, \text{d}t = 0$ is valid when it is being jointly integrated with respect to the same variable $t$, thus, symbolically $$\int_{t=0}^T f(t) \text{d}W_t\, \text{d}t = 0.$$ In your case you have two different integrals with respect to different integration variables $u$ and $v$, so you can't say that $\text{d}W_u\, \text{d}v$ is null. You can interpret it as a "double differential" in a similar fashion than $\text{d}x\, \text{d}y$ in double integrals.
What is it then?
It is a good exercise to prove that an integral of the form $\int_0^T f(s)dW_s$ is normally distributed with zero mean and variance $\int_0^T |f(s)|^2ds $.
(Hint: write the Riemann sums and interpret them as a sum of independent normally distributed random variables.)
Thus, the factor to the left is a random variable. On the other hand, assuming that $g$ is a real valued function (non-random), the factor to the right is a number, call it $\sigma$. The product of those two then is a normally distributed random variable of mean $0$ and variance $\sigma^2\int_0^T |f(s)|^2ds$.
|
[
"stackoverflow",
"0053059054.txt"
] | Q:
Android Google Map in Fragment always show the SupportMapFragment was null
I want build a android map application and Launch app will got null from SupportMapFragment(said my supportMapFragment is null), if you can point me where is my mistake, please help, many thanks.
code, XML and manifest as below.
By the way, the MapFragment is come from BottomNavigationView tab
R.id.navigation_map -> {
val mapFragment = MapFragment()
mapFragment.setArguments(intent.extras)
supportFragmentManager.beginTransaction()
.add(R.id.fragment_container, mapFragment).commit()
return@OnNavigationItemSelectedListener true
MapFragment.java
public class MapFragment extends Fragment implements OnMapReadyCallback {
private View rootView;
private AppCompatActivity context;
private SupportMapFragment supportMapFragment;
private GoogleMap map;
public MapFragment() {
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
rootView = inflater.inflate(R.layout.fragment_map, container, false);
context = (AppCompatActivity) getActivity();
supportMapFragment = (SupportMapFragment) getFragmentManager().findFragmentById(R.id.fragmentmap);
if (supportMapFragment != null) {
supportMapFragment.getMapAsync(this);
getFragmentManager().beginTransaction()
.replace(R.id.fragment_container, supportMapFragment).commit();
} else {
Toast.makeText(context, "Error - Map Fragment was null!!", Toast.LENGTH_SHORT).show();
}
return rootView;
}
@Override
public void onMapReady(GoogleMap googleMap) {
}
}
fragment_map.xml
<android.support.design.widget.CoordinatorLayout
xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:fitsSystemWindows="true">
<fragment
android:layout_width="match_parent"
android:layout_height="match_parent"
android:id="@+id/fragmentmap"
tools:context=".MainActivity"
android:name="com.google.android.gms.maps.SupportMapFragment" />
</android.support.design.widget.CoordinatorLayout>
Manifests
<uses-feature
android:glEsVersion="0x00020000"
android:required="true"/>
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@style/AppTheme">
<uses-library android:name="org.apache.http.legacy" android:required="false"/>
<activity
android:name=".MainActivity"
android:label="@string/app_name">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<meta-data
android:name="com.google.android.gms.version"
android:value="@integer/google_play_services_version" />
<meta-data
android:name="com.google.android.geo.API_KEY"
android:value="@string/google_maps_key" />
</application>
A:
If you are using fragment inside another fragment then better to use getChildFragmentManager as it return a private FragmentManager for placing and managing Fragments inside of this Fragment.
supportMapFragment = (SupportMapFragment) getChildFragmentManager().findFragmentById(R.id.fragmentmap);
Reference: getChildFragmentManager
|
[
"stackoverflow",
"0061656675.txt"
] | Q:
Unexpected array behaviour with use of sprintf in C
I am facing weird behaviour after apply sprintf: list[0] seems just gone away and result of strlen is 0. Then I try to apply strcpy, strlen meets expectation and returns 3. So my question is why sprintf will erase my list[0], how do I recover value of list[0] if I insist to apply sprintf? thanks in advance.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void main() {
char list[5][7] = { "One", "Two", "Three", "Four", "Five" };
char item[7];
int i = 0;
for (i = 0; i < 5; i++) {
sprintf(item, "%-7s", list[i]);
//strcpy(item, list[i]);
}
printf("%d", strlen(list[0]));
}
A:
Your code has undefined behavior because %-7s requires at least 8 bytes in the destination array: 7 for the left aligned string plus one for the null terminator. Note that if any of the strings were longer than 7 bytes, even more space would be required in the destination array.
This mistake might explain the observed behavior: sprintf writes 8 bytes to item, ie one byte too many, and if the array list starts in memory just after the end of item, its first byte would be overwritten with a null bytes causing list[0] to to be truncated and become an empty string with a length of 0.
You should use snprintf that prevents buffer overflow and make your array at least 8 bytes long.
Note also that main has a return type of int.
Here is a modified version:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main() {
char list[5][7] = { "One", "Two", "Three", "Four", "Five" };
char item[8];
for (int i = 0; i < 5; i++) {
snprintf(item, sizeof item, "%-7s", list[i]);
}
printf("%d\n", strlen(list[0]));
return 0;
}
A:
As other said:
At this line: sprintf(item, "%-7s", list[i]); the item char array is supposed to be at least 8 bytes long to hold 7 characters + the end string byte \0.
Moreover sprintf is dangerous you should use snprintf instead.
|
[
"stackoverflow",
"0037197013.txt"
] | Q:
Cannot use readMavenPom in Jenkinsfile
I am working on a Jenkinsfile for use with Jenkins 2.0. The readMavenPom method is not recognized. Is there some configuration I am missing to make this available?
My Jenkinsfile:
node {
stage 'Checkout'
checkout scm
env.PATH = "${tool 'maven-3'}/bin:${env.PATH}"
stage 'Build'
def pom = readMavenPom file: 'pom.xml'
echo "${pom}"
sh "mvn -DskipTests=true verify"
}
When run, I get the following error:
java.lang.NoSuchMethodError: No such DSL method 'readMavenPom' found among
[AWSEBDeployment, archive, bat, build, catchError, checkout, deleteDir, dir, echo,
emailext, error, fileExists, git, input, isUnix, load, mail, node, parallel,
properties, pwd, readFile, retry, sh, slackSend, sleep, stage, stash, step, svn,
timeout, tool, unarchive, unstash, waitUntil, withCredentials, withEnv, wrap,
writeFile, ws]
A:
I needed to install the pipeline-utility-steps plugin.
|
[
"pt.stackoverflow",
"0000094371.txt"
] | Q:
Dúvida sobre each() no Jquery
Porque o código abaixo só me retorna o primeiro "104 104" e não "104 106"
$(function(){
$('#sortable li').each(function(){
console.log($('#sortable li').attr('id'));
});
});
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<ul id="sortable">
<li id="104"></li>
<li id="106"></li>
</ul>
A:
Quando usas $('#sortable li').attr('id') o jQuery vai retornar somente o id do primeiro elemento que esse seletor encontrar.
Quando usas essa linha de código dentro de um .each() não estás na verdade a mudar nada, e daí ele dar a mesma resposta nas duas vezes.
Contudo o seletor $('#sortable li') retorna todos os elementos que queres e faz sentido usar o .each() para os iterar. Sendo que o jQuery atribui o elemento iterado ao this dentro dessa função passada ao .each() então para saberes o id basta usares this.id, como referi no meu comentário antes.
Assim basta usares:
$('#sortable li').each(function(){
console.log(this.id);
});
NOTA: repara que te falta }); no final do teu código, antes do </script>.
|
[
"stackoverflow",
"0028675982.txt"
] | Q:
Ruby: loops and difference in rescuing
I'm studying Ruby and I'm quite frustrated. Here are three code examples which are meant to do the same thing:
Example 1
animals = %w(dog cat horse goat snake frog)
count = 0
begin
animals.each do |animal|
puts "The current animal is #{animal}"
break if count == 10
count += 1
raise if animal == 'horse'
end
rescue
retry
end
It works fine. I tried to do the same trick with for (and perhaps I got lost in the sequence ending):
Example 2
animals = %w(dog cat horse goat snake frog)
count = 0
for animal in animals
begin
puts "The current animal is #{animal}"
break if count == 10
count += 1
raise if animal == 'horse'
end
rescue
retry
end
It doesn't work (syntax error, unexpected keyword_rescue, expecting keyword_end).
Here I tried to use retry inside a for loop (well, it actually was my first try), but instead of retrying the whole loop it just retries current iteration, giving out a dog, a cat and a bunch of horses:
Example 3
animals = %w(dog cat horse goat snake frog)
count = 0
for animal in animals
begin
puts "The current animal is #{animal}"
break if count == 10
count += 1
raise if animal == 'horse'
rescue
retry
end
end
So what am I doing wrong? Is retrying inside a loop a wrong idea overall? Why do each and for loops work differently here? How do I make a correct retry from for loop?
Based on this thread: https://teamtreehouse.com/forum/getting-an-error-when-practicing-retry-command-in-forloops
A:
The better way to use a counter for your first example would be
begin
animals.each_with_index do |animal, count|
puts "The current animal is #{animal}"
break if count == 10
count += 1
raise if animal == 'horse'
end
rescue
retry # should be next
end
But that is a continous loop because you do a retry which keeps getting the horse back and so raising an error etc, you could use next instead but if you just want to show all the animals without the horse the following is more 'rubyesque'. First you select all the non 'horse' animals, then you limit the result to the first 10.
animals
.reject{|animal| animal == 'horse'}
.take(10)
.each{|animal| puts "The current animal is #{animal}"}
You example with for isn't rubiesque at all but here the adjusted code (indentation !)
animals = %w(dog cat horse goat snake frog)
count = 0
for animal in animals
begin
puts "The current animal is #{animal}"
break if count == 10
count += 1
raise if animal == 'horse'
rescue
retry # or next ?
end
end
|
[
"stackoverflow",
"0011466595.txt"
] | Q:
Listening to text/event-stream using C#
I am writing a C# class to wrap around a simple web service. The RESTful stuff was easy to wrap up, but now I need to raise an event when something on the server changes.
I've set the server up to make an event stream available, but I don't know how to pick up the stream in C#.
Currently I'm dong something like this:
public class ServiceWrapper
{
private readonly wc = new WebClient();
public ServiceWrapper()
{
wc.OpenReadAsync(new Uri(UriOfEvent));
wc.OpenReadCompleted += ServerEventOccurs;
}
private void ServerEventOccurs(object sender, OpenReadCompletedEventArgs args)
{
using (var sr = new StreamReader(args.Result))
{
var message = ParseStream(sr);
RaiseServerEventOccurred(message);
}
wc.OpenReadAsync(new Uri(UriOfEvent));
}
//usual code for declaring and raising ServerEventOccurred event
}
In my test the event gets picked up once, but not twice. The event on the server is essentially a switch - something happens and it goes on, something else happens and it goes off. I know the switching works, because I've hooked the event stream up to a normal web page to test it.
How should I be dealing with event streams in C#?
Edit 1: I've updated the code to fix the bug TimVK points out, but the event stream still isn't being picked up the second time it should be.
A:
Doesn't it work when you put your wc as a property in the class instead of creating always a new one in your methods?
public class ServiceWrapper
{
WebClient wc {get;set;}
public ServiceWrapper()
{
wc = new WebClient();
wc.OpenReadAsync(new Uri(UriOfEvent));
wc.OpenReadCompleted += ServerEventOccurs;
}
private void ServerEventOccurs(object sender, OpenReadCompletedEventArgs args)
{
using (var sr = new StreamReader(args.Result))
{
var message = ParseStream(sr);
RaiseServerEventOccurred(message);
}
wc = new WebClient();
wc.OpenReadAsync(new Uri(UriOfEvent));
}
//usual code for declaring and raising ServerEventOccurred event
}
Then I suppose the event should be raised everytime.
|
[
"stackoverflow",
"0052355455.txt"
] | Q:
How to show and hide my contents when I click my icon
I just created a card-form(bootstrap) and in the form,
there are a plus icon and hidden contents(id=mycontent_1 with display: none). what I'm trying to do is listed as follows.
I tried to do the first one on my java-script but it's not working.
when I click plus icon, my function(toggler) should be executed and the icon would be hidden and my contents(text boxes and delete button) have to be visible.
similarly in opposite direction, when I click the delete button, my
contents(text boxes and a delete button) have to be invisible and the
plus icon should be visible.
need your kind helps for my two functions.
here are my codes for jsfiddle.
https://jsfiddle.net/Sanchez/aq9Laaew/219304/
<div class="col-sm-6">
<div class="card">
<div class="card-header" id="cardHeader1" style="visibility: hidden;"> no name </div>
<div class="card-body">
<a href="#" class="btn btn-info btn-lg" onclick="toggler('myContent_1');">
<span class="glyphicon glyphicon-plus" id=icon1 onclick="toggler('myContent_1');"></span> Plus
</a>
<div id="myContent_1" class="card-title" style="display: none;" >
<form action="" method="post">
<div class="form-group">
<div class="input-group">
<div class="input-group-prepend">
<span class="input-group-text">Number</span>
</div>
<input type="text" id="notiSeq_1" name="notiSeq" class="form-control" value="">
<div class="input-group-append">
<span class="input-group-text">
<i class="fa fa-sort-numeric-asc"></i>
</span>
</div>
</div>
</div>
<div class="form-group">
<div class="input-group">
<div class="input-group-prepend">
<span class="input-group-text">Title</span>
</div>
<input type="text" id="title_1" name="title" class="form-control" value="">
<div class="input-group-append">
<span class="input-group-text">
<i class="fa fa-tumblr"></i>
</span>
</div>
</div>
</div>
<div class="form-group form-actions">
<button type="button" id="delBtn_1" class="btn btn-danger">Delete</button>
</div>
</form>
</div>
</div>
</div>
</div>
function toggler(divId){
var tempId = divId.slice(-1);
var x = document.getElementById("icon" + tempId);
var y = document.getElementById("cardHeader" + tempId);
x.style.display = "none";
y.style.visibility = "visible";
$("#delBtn_" + tempId).show();
$("#" + divId).toggle();
}
A:
To begin with you should place you js code on the head before the body.
Afterwards, replace the a tag with button
Finally call toggler function on delete button's onclick
<script>
function hidePlusBtn() {
$("#plusBtn").hide();
}
function toggler(divId) {
var tempId = divId.slice(-1);
var x = document.getElementById("icon" + tempId);
var y = document.getElementById("cardHeader" + tempId);
x.style.display = "none";
y.style.visibility = "visible";
$("#delBtn_" + tempId).show();
$("#" + divId).toggle();
}
</script>
<div class="col-sm-6">
...
<button
id="plusBtn"
class="btn btn-info btn-lg"
onclick="toggler('myContent_1');">
<span
class="glyphicon glyphicon-plus"
id=icon1
onclick="toggler('myContent_1');">
</span> Plus
</button>
...
<button
type="button"
id="delBtn_1"
class="btn btn-danger"
onclick="toggler('myContent_1'); hidePlusBtn()">Delete
</button>
...
</div>
Updated Demo: https://jsfiddle.net/1s390orm/
|
[
"stackoverflow",
"0047833418.txt"
] | Q:
angular 4 @Input paramter changed inside a subscribe does not update the view
I have got a select component and I parameter it like this:
<app-select-v2
[formGroup]="model"
[controlName]="'owner'"
[options]="ownerOptions"
[label]="'Select task\'s owner'"
[width]="'100%'"
[height]="'30rem'"
[scrollFrom]="5"
></app-select-v2>
This is a custom select-option list what gets the ownerOptions array. The array looks like this:
ownerOptions:Option[] = [
{name: 'name1', value: 'value1'},
{name: 'name2', value: 'value2'}
...
];
At the start this is an empty array and I want to fill it with data. The setOwnerOptions() function what is suppose to do the fill. I call it in ngOnInit()
setOwnerOptions(){
this._calendar.getActiveUsers().subscribe((names:string[]) => {
for(var i:number = 0; i<names.length; i++){
var _option = new Option();
this._calendar.getUserFullName(names[i]).subscribe((fullname:string) =>{
_option.name = fullname;
_option.value = names[i];
this.ownerOptions.push(_option);
});
}
});
}
(I know this is not the best way to get the id(name) and full_names from the api. I will rewrite this part soon.) The getActiveUsers() and the getUserFullName(name:string) functions are working fine. I get the requested data from the api and the loop is pushing it to the ownerOptions array. But the point is the view does not update and outside of the subscribe the ownerOptions is still an empty array because angular doesn't notice the change.
Could someone help me with this please? I have sort of experience with this :/
(I tried to create a zone and run the script with changeRef but It worked the same for me.)
Thanks your answers and time :)
A:
The problem is here:
_option.value = names[i];
At this point i equals names.length - 1 for all subscriptions. It's indeed not the best piece of code, but it's hard to see what you actually want to achieve, so to fix your issue you can change it to this. By using a forEach loop you prevent this issue:
setOwnerOptions(){
this._calendar.getActiveUsers().subscribe(names => {
names.forEach(name =>
this._calendar.getUserFullName(name).subscribe(fullname => {
let option = new Option();
option.name = fullname;
option.value = name;
this.ownerOptions.push(_option);
})
})
}
Not entirely sure, but I believe you can also just fix it by using the let keyword in the for loop, instead of var. Actually, you should -never- use the var keyword, unless you really know what you are doing, and even then you know not to use it
|
[
"stackoverflow",
"0033870908.txt"
] | Q:
Lowest API level used for Android development as of November-2015
I am developing new application. I just want to know that what would be the lowest API version that should be used for my application ? Currently I am using minimum API level-14 (Ice Cream Sandwich) and targeting API is Level-23 (Marshmallow).
I have followed http://developer.android.com/about/dashboards/index.html#OpenGL for the same. Any other things that should be taken care to choose minimum API level.
A:
so as of today only 4% of devices are using API level lower than 15
So keeping minimum SDK as 15 you will be able to cover a decent amount of users with less hassle for managing compatibility on lower SDK versions.
this page will give you a head start for other factors to consider while targeting your apps.
|
[
"stackoverflow",
"0019768860.txt"
] | Q:
Responsive youtube video
On many places I found this code to make a video responsible, but it doesn't work for me.
<div id='wrapp'>
<iframe id='player' src="//www.youtube.com/embed/VWSL2SykovA?rel=0"></iframe>
</div>
css
#wrapp {
position: relative;
padding-bottom:75%; // video is 4:3 aspect ratio
padding-top: 25px;
height: 0;
width:70%;
margin:15px auto;
z-index:2;
border:medium ridge #b30000;
border-radius:9px;
}
#player{
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
video, i.e. iframe is too tall.
Here is the FIDDLE
A:
Your proportions seems to be faultives.
Try that css settings proposed by Zurb Foundation
#wrapp {
height: 0;
margin-bottom: 16px;
margin-left: 30px;
overflow: hidden;
padding-bottom: 67.5%;
padding-top: 25px;
position: relative;
}
#player {
height: 100%;
left: 0;
position: absolute;
top: 0;
width: 100%;
}
jsFiddled here
Here's a screen capture of nhZBV/4
|
[
"cooking.stackexchange",
"0000093302.txt"
] | Q:
Tomato substitute in italian dishes?
I'm looking for some delicious Italian dishes,but have to avoid tomatoes. What can be used as an alternate to tomatoes?
Using Google, I found Red Pepper Pasta Sauce, and Cream Sauce.
I have also heard that yogurt is a good substitute for tomato. Can yogurt be a replacement in Italian dishes?
A:
Firstly, Italy is a country whose cuisine is defined regionally. It varies quite widely. While tomatoes are common throughout Italy, there are plenty of Italian dishes that do not use tomato. Any advice about substitutions would be better provided if you have a specific dish in mind. I can't see yogurt substituting for tomato. While yogurt might provide the acidity that a tomato has, it also as a lactic component that would probably alter most dishes that called for tomato. It could be delicious, but it would be different from the original intention.
|
[
"math.stackexchange",
"0000221638.txt"
] | Q:
When is the cokernel of a homomorphism of flat sheaves flat?
Let $X\to S$ be a scheme and let $D'$ and $D$ be relative effective Cartier divisors on $X$ satisfying $D' \subset D$ and let $D''$ satisfy $D = D' + D''$. Let
$$0 \to \mathscr{O}_X(D'') \to \mathscr{O}_X(D) \to \mathscr{L} \to 0$$
be the induced exact sequence of sheaves on $X$. Of course $\mathscr{O}_X(D')$, $\mathscr{O}_X(D'')$ and $\mathscr{O}_X(D)$ are flat, but
Question: Is $\mathscr{L}$ necessarily flat? If not, when does flatness fail.
A colleague of a colleague has said that a positive answer (i.e. that $\mathscr{L}$ is indeed flat) is "at the beginning of Katz-Mazur", and I presume he is referring to Section 1.3, but I don't see how to derive the result from the material there. Section 1.3 is where the existence of the relative effective Cartier divisor $D''$ such that $D = D' + D''$ is stated, from which we can derive the exact sequence
$$0 \to \mathscr{O}_X(D'') \to \mathscr{O}_X(D) \to \mathscr{O}_X(D) \otimes \iota_*\mathscr{O}_{D'} \to 0$$
by tensoring
$$0 \to \mathscr{I}_{D'} \to \mathscr{O}_X \to \iota_*\mathscr{O}_{D'} \to 0$$
with $\mathscr{O}_X(D)$. So it would suffice to show that $\mathscr{O}_X(D) \otimes \iota_*\mathscr{O}_{D'}$ is flat, for which it would suffice to show that $\iota_*\mathscr{O}_{D'}$ is flat. Any ideas?
A:
This is in the definition of a relative effective Cartier divisor: $O_{D'}$ is flat over $S$.
|
[
"mathematica.stackexchange",
"0000212934.txt"
] | Q:
Obtain PlotLabel from Plot
I have a very simple question that I cant find answer: how do I extract the PlotLabel from an existing plot? For example
plotwithLabel = Plot[x, {x, 1, 2}, PlotLabel -> "This is Label"]
Any way to get the text "This is Label"?
A:
Plot is returning a Graphics object, with the label specified in its Options. Retrieve the options with
In[2]:= Options[plotwithLabel, PlotLabel]
Out[2]= {PlotLabel -> "This is Label"}
A:
PlotLabel /. plotwithLabel[[2]]
"This is Label"
A:
plotwithLabel = Plot[x, {x, 1, 2}, PlotLabel -> "This is Label"];
PlotLabel /. Cases[plotwithLabel, _Rule, All]
"This is Label"
If you look at the output of e.g. SequenceForm@ InputForm@ plotwithLabel, you will see the internal representation of the plot as a Graphics object. You will note that it contains many options expressed as Rules (i.e. OptionName -> optionValue, like maybe PlotRange -> All). Such expressions are actually represented as Rule[PlotRange, All] in their native form, so they have head Rule.
My Cases expression extracts all possible Rule[something, something] expressions at All levels inside the plot's internal representation. It then uses them as replacement rules in a ReplaceAll expression (/.) to fish out the value of the one you are interested in, i.e. PlotLabel.
|
[
"stackoverflow",
"0005602380.txt"
] | Q:
Is * in a mysql query slower than picking specific columns?
I'm trying to figure out if:
SELECT * FROM tbl_mine
is slower than:
SELECT this_column,that_column FROM tbl_mine
in my MySQL queries. Can anyone speak to this based on experience?
A:
It's slower because the database has to send more data back to the client, but unless the data you're selecting is substantial this delay (on a simple query like you've provided) should be negligible. In other words, if you have a table with three fields, SELECT field1, field2, field3 FROM table is the same as SELECT * FROM table.
A:
Ignoring any specific index-driven optimisations (which may very well make a huge difference, but that's a wider discussion), the query itself won't take any non-trivial extra time.
You may experience slight slowdowns with the increased amount of data being sent through buffers and whatnot. You may certainly experience even significant slowdowns if you're then sending this data (especially if it's a lot) over a network connection or between applications.
|
[
"stackoverflow",
"0022244074.txt"
] | Q:
Iterate over a dictionary by comprehension and get a dictionary
How to iterate over a dictionary by dictionary comprehension to process it.
>>> mime_types={
'.xbm': 'image/x-xbitmap',
'.dwg': 'image/vnd.dwg',
'.fst': 'image/vnd.fst',
'.tif': 'image/tiff',
'.gif': 'image/gif',
'.ras': 'image/x-cmu-raster',
'.pic': 'image/x-pict',
'.fh': 'image/x-freehand',
'.djvu':'image/vnd.djvu',
'.ppm': 'image/x-portable-pixmap',
'.fh4': 'image/x-freehand',
'.cgm': 'image/cgm',
'.xwd': 'image/x-xwindowdump',
'.g3': 'image/g3fax',
'.png': 'image/png',
'.npx': 'image/vnd.net-fpx',
'.rlc': 'image/vnd.fujixerox.edmics-rlc',
'.svgz':'image/svg+xml',
'.mmr': 'image/vnd.fujixerox.edmics-mmr',
'.psd': 'image/vnd.adobe.photoshop',
'.oti': 'application/vnd.oasis.opendocument.image-template',
'.tiff':'image/tiff',
'.wbmp':'image/vnd.wap.wbmp'
}
>>> {(key,val) for key, val in mime_types.items() if "image/tiff" == val}
This is returning result like this:
set([('.tiff', 'image/tiff'), ('.tif', 'image/tiff')])
But I'm expecting
('.tif', 'image/tiff')
How can I modify that result to get a dictionary like :
{'.tif': 'image/tiff'}
A:
Replace
{(key,val) for key, val in mime_types.items() if "image/tiff" == val}
with
{key: val for key, val in mime_types.items() if "image/tiff" == val}
A:
The expression:
{ value for bar in iterable }
is a set comprehension.
In order to do a dict comprehension, you have to provide Python with a set of key-value pairs separated by ::
{ key: value for bar in iterable }
A:
You can do dictionary comprehension as @Anubhav Chattoraj suggested.
Or pass a generator expr as an argument to function dict:
In [165]: dict((k, mimes[k]) for k in mimes if mimes[k] == "image/tiff")
Out[165]: {'.tif': 'image/tiff', '.tiff': 'image/tiff'}
Don't mix the two ways up..
|
[
"stackoverflow",
"0042206512.txt"
] | Q:
To allow GET requests, set JsonRequestBehavior to AllowGet
I have bound bulk records in a Kendo UI grid. The response is returned from Json.
I am getting Error while using below format:
Problem Code : Method 1:
public JsonResult KendoserverSideDemo(int pageSize, int skip=10)
{
using (var s = new KendoEntities())
{
var total = s.Students.Count();
if (total != null)
{
var data = s.Students.OrderBy(x=>x.StudentID).Skip(skip)
.Take(pageSize).ToList();
return Json(new { total = total,
data = data,
JsonRequestBehavior.AllowGet });
}
else
{
return null;
}
}
}
Method 2 : Working fine using this:
public JsonResult KendoserverSideDemo(int pageSize, int skip=10)
{
using (var s = new KendoEntities())
{
var total = s.Students.Count();
if (total != null)
{
var data = s.Students.OrderBy(x=>x.StudentID).Skip(skip)
.Take(pageSize).ToList();
return Json(data, JsonRequestBehavior.AllowGet);
}
else
{
return null;
}
}
}
What is the problem in first Method 1?
A:
You have simple typo/syntax error
return Json(new { total = total, data = data,JsonRequestBehavior.AllowGet });
The JsonRequestBehavior.AllowGet is the second parameter of Json - it shouldnt be part of the object
return Json(new { total = total, data = data }, JsonRequestBehavior.AllowGet);
|
[
"boardgames.stackexchange",
"0000027956.txt"
] | Q:
Does a -1/-1 effect from a static ability cause a creature to die before it can do anything, upon entering the battlefield?
My friend has played Engineered Plague, naming Elf. Is the Elvish Hexhunter of any use here? Imagine it has haste via Concordant Crossroads.
A:
Your elf can't do anything. It becomes a 0/0 just after it enters the battlefield, and pretty much immediately goes to the graveyard. There's no opportunity to activate the ability before it dies. This has to do with state-based actions, and the fact that they're checked before you get priority:
704.3. Whenever a player would get priority (see rule 116, “Timing and Priority”), the game checks for any of the listed conditions for
state-based actions, then performs all applicable state-based actions
simultaneously as a single event.
704.5f If a creature has toughness 0 or less, it’s put into its owner’s graveyard. Regeneration can’t replace this event.
Triggered abilities, such as that of Elvish Visionary, will still trigger. You will place the ability onto the stack as soon as you get priority, which in this case is after the creature is already dead.
|
[
"stackoverflow",
"0001130122.txt"
] | Q:
1x10^49 in decimal - how binary bits is that and how would you convert it to binary?
I've encountered a website that uses a 50-digit decimal integer ID in a URL query string, which seems a little excessive.
The smallest 50-digit decimal number is 1.0 x 10^49, otherwise known as:
1000000000
0000000000
0000000000
0000000000
0000000000
How many bits would the binary representation contain?
How would you approach converting such a large decimal number to binary, taking into consideration the range limit of unsigned 32-bit-integer or 64-bit integers?
I ask out of pure programmer curiosity only - this is not a college question, work problem or interview puzzle!
A:
The minimal binary representation(with integer precision) can be found by taking the log (base 2) of the number. In this case the minimal amount of binary bits would be log(10^49) = 162.77. We need a whole number so we will just call it 163 bits.
If I had to represent that number, and the precision in a floating point representation was insufficient, I would just use some BigInteger library.
A:
49 * log(10) / log(2) = 162.774477, so the binary representation would contain 163 bits.
Use a bigint class and apply the standard algorithm for converting from decimal to binary.
A:
Since every decimal digit conveys the same information as lb 10 bits, any 50 digit number will fit into ceil(lb(10)*50) = 167 bits.
Specifically, it's not that hard to convert from decimal to binary, even by hand. Just divide by two, and put the modulus(1 if the last digit was odd, 0 if even) at the end of your binary result. If you need such high numbers in a program, just use your platform's big integer implementation, e.g. BigInteger in Java and just int in python. In the absence of that, look for a numerical library.
Oh, and 10^49 in binary is 163 bit long:
110
1101 0111 1001 1111 1000 0010 0011 0010
1000 1110 1010 0011 1101 1010 0110 0001
1110 0000 0110 0110 1110 1011 1011 0010
1111 1000 1000 1010 0000 0000 0000 0000
0000 0000 0000 0000 0000 0000 0000 0000
|
[
"stackoverflow",
"0055281439.txt"
] | Q:
TFS2018: Use npm publish task to release a built npm package
I have a build that creates an npm package (tgz). I would like to add a release pipline to publish this package to our npm server.
The package is available from the artifacts and I can download it in the release pipline. But to publish it with the NPM Task I need its exact name, which has the version number in it.
npm publish mypackage-1.0.0.tgz
I cannot see any way to get this version number or the full filename of the artifact in the release pipline.
I could probably use a powershell script to do this, but then I am missing the NPM credentials that I configured in TFS.
Am I supposed to do this differently? Am I missing something?
A:
use the powershell to find the file and set the filename as a new variable for the current release-process, then you could use the npm-publish task with the configured credentials
|
[
"stackoverflow",
"0036241785.txt"
] | Q:
How do I autoshrink a dynamic h1 element for different text length?
I have an h1 element that changes its HTML dynamically and I want it to auto shrink itself for longer text but return to its fixed font size for shorter text.
Here is the markup:
<div class="frame" dir="ltr">
<div class="frame-view">
<h1> //dynamic text </h1>
</div>
</div>
and the styling:
.frame {
z-index: 2;
position: relative;
top: 100px;
}
.frame-view {
text-align: center;
}
.frame-view h1 {
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
font-style: normal;
font-variant: normal;
font-weight: 200;
font-size: 40px;
color: white;
padding: 0 20px;
}
Here's what the h1 looks like when a long text is in it, on a mobile screen:
rather than staying within the 20px left, right padding and just auto shrinking the text, it stretches past the HTML.
in landscape it looks fine of course because there is enough room to contain it:
So how do I make this h1 auto shrink for longer text and stay within the boundaries ?
UPDATE
to further understand what I am trying to achieve, think of it like this:
I want the container of the h1 to be static and the text adjust its size when it gets closer to the container edges. Imagine a Label being 50 px, now if you put the word "hello" in that label at 20px font size, it would be 20px font size with no problem.. but if you put the words "Hello how are you" it wouldnt fit within the boundaries at 20px font size so it would have to auto shrink itself to stay within the boundaries of the container.
A:
you should start with a given text width in pixels, then measure the width of the given text using the measuretext function. If width is approaching the width of your container, then shrink the font size and remeasure. You should obviously have a limit on how small the text can go before it becomes illegible. Once text gets too small, you have to truncate and add dots onto the end.
Here is a sample that shows roughly how to do it. You need javascript, and I have included jquery to make it easier. As you add more text, and the text starts getting near the edge of the box, the fontsize will shrink, down to a minimum of 10 pixels. After that text that is too long will be truncated.
You could also make the text grow again if it got shorter using similar functions but in reverse.. growing the font size till it just overflowed, then back one step. However I have not included code for that.
<!DOCTYPE html>
<html>
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.12.0/jquery.min.js"></script>
</head>
<body>
<br>Enter your text:
<p></p>
<input type=text id=input_id style='width: 300px'></input>
<p></p>
<canvas id="myCanvas" width="300" height="150" style="border:1px solid #d3d3d3;">
Your browser does not support the HTML5 canvas tag.</canvas>
<script>
var txt = "Hello World more"
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
var fontsize = 30
ctx.font = fontsize.toString() + "px Arial";
// ctx.fillText("width:" + ctx.measureText(txt).width, 10, 50);
// ctx.fillText(txt, 10, 100);
$('#input_id').val(txt)
ObserveInputValue('')
ObserveInputValue(txt)
// a bit crude but simple and it works: watch the input object every tenth sec
setInterval(function() {
ObserveInputValue($('#input_id').val());
}, 100);
function ObserveInputValue(iv) {
if (iv != txt) {
console.log('input value changed to: ' + iv)
txt = iv
ctx.clearRect(0, 0, 300, 150)
textwid = ctx.measureText(txt).width.toFixed(0)
// adjust font size so that text just fits
var maxfont = 50
var minfont = 10
while (textwid > 290 && fontsize > minfont) {
sizedecrease = Math.max(1, fontsize * 0.1).toFixed(0) // decrease by 10% or 1 pixel whichever is greater
fontsize -= sizedecrease
ctx.font = fontsize.toString() + "px Arial";
textwid = ctx.measureText(txt).width.toFixed(0)
}
// if text at min and still too long truncate text
while (textwid > 290 && fontsize <= minfont) {
// chop off last characters of text
txt = txt.substring(0, txt.length - 1)
textwid = ctx.measureText(txt).width.toFixed(0)
$('#input_id').val(txt)
}
// exercise to the reader.. increase font size again if user makes text shorter
ctx.fillText("width:" + textwid, 10, 50);
ctx.fillText("font:" + fontsize, 10, 100);
ctx.fillText(txt, 10, 150);
}
}
</script>
</body>
</html>
|
[
"stackoverflow",
"0016770005.txt"
] | Q:
Oracle Update and Return a Value
I am having a Update Statement on a large volume table.
It updates only one row at a time.
Update MyTable
Set Col1 = Value
where primary key filters
With this update statement gets executed I also want a value in return to avoid a Select Query on a same table to save resources.
What will be my syntax to achieve this?
A:
You can use the RETURNING keyword.
Update MyTable
Set Col1 = Value
where primary key filters
returning column1,column2...
into variable1,variable2...
|
[
"stackoverflow",
"0007742090.txt"
] | Q:
How to sort div's by content date
I'm trying to sort div's by content date...
What i've got is sorting. But not by date...
HTML
<div id="all_elements">
<!-- one element -->
<div class="element">
<div class="display-number">02</div>
<div class="year">20-10-2011</div>
</div><!-- element -->
<!-- one element -->
<div class="element">
<div class="display-number">03</div>
<div class="year">22-09-2011</div>
</div><!-- element -->
<!-- one element -->
<div class="element">
<div class="display-number">01</div>
<div class="year">01-12-2011</div>
</div><!-- element -->
<!-- one element -->
<div class="element">
<div class="display-number">04</div>
<div class="year">01-06-2011</div>
</div><!-- element -->
<!-- one element -->
<div class="element">
<div class="display-number">05</div>
<div class="year">01-06-2010</div>
</div><!-- element -->
</div> <!--all_elements-->
JQUERY
<script>
function sortDescending(a, b) {
return $(a).find(".year").text() < $(b).find(".year").text() ? 1 : -1;
};
$(document).ready(function() {
$('#all_elements .element').sort(sortDescending).appendTo('#all_elements');
});
</script>
I know i need a function to figure out the content is a date..
Just don't know how...
Who can help me out?
A:
You could do:
function sortDescending(a, b) {
var date1 = $(a).find(".year").text();
date1 = date1.split('-');
date1 = new Date(date1[2], date1[1] -1, date1[0]);
var date2 = $(b).find(".year").text();
date2= date2.split('-');
date2= new Date(date2[2], date2[1] -1, date2[0]);
return date1 < date2 ? 1 : -1;
};
$(document).ready(function() {
$('#all_elements .element').sort(sortDescending).appendTo('#all_elements');
});
EDIT - corrected the typo according to the comment! Fiddle here http://jsfiddle.net/TsZeg/
|
[
"rpg.stackexchange",
"0000109339.txt"
] | Q:
Does Shield of Faith stack?
Could shield of faith cast by multiple casters stack on one creature?
A:
No
Spells don't stack with themselves.
Combining Magical Effects:
The effects of different spells add together while the durations of those spells overlap. The effects of the same spell cast multiple times don't combine, however. Instead, the most potent effect — such as the highest bonus — from those castings applies while their durations overlap. (PHB 205)
A:
Short answer:
No.
Long Answer:
As long as an effect is active on the player, you can't stack it, casting it again would just extend the duration.
|
[
"sharepoint.stackexchange",
"0000207614.txt"
] | Q:
A different version of this App is already installed with the same version number
This is the error message when I try to upload spfx webpart, nothing wrong webpart because I can upload it to new created site, but cant replace with existing one.
how can I resolve this?
A:
In this kind of situation I usually go to recycle bin and delete everything related to the app and app itself. Then go to the second stage recycle bin and do the same.
That might help, give it try. In most situations you don't need to increase version number.
|
[
"stackoverflow",
"0012292852.txt"
] | Q:
MVVM best practice - work directly on the reference from list members of the model or make a copy?
Assume I have a model which offers me a property, which is list of custom objects. The model is a singleton and I give the reference of this property to the ViewModel. These custom objects have for example boolean properties. Now the user changes these values by clicking in the view and operating on the references in the model. Thus the list in my model does not change and therefore the model can not notify other views unless I implement event handling for every list member.
On the other hand side my model notifies others if I assign a complete new list.
The question is: is it in general good idea to work on the references of my model or should every ViewModel make a deep copy of the list content and assign later a complete new list to the model. And if not should I add event handling for every member of the list in my model?
Btw. the ObservableCollection is no option because it does not detect item changes and I think a general model should use the Ilist interface.
A:
There is no straighforward answer to your question, it is all a matter of how you want your application to behave.
If you want any changes made by the user to be immediately effective in the application, then you should work directly on the references returned by your model. But on the user hand, you have to handle the cases where the application has the initiative to make changes to the list and notify the user.
On the other hand, if you prefer that any modification made by the user has to be validated by the push of a "Apply changes" button, the option of cloning the list is probably the best choice.
|
[
"stackoverflow",
"0046961670.txt"
] | Q:
How to interact with DOM on a UWP WebView?
I have a UWP XAML Page and a WebView
<WebView Name="webview1" Margin="10"
Source="http://www.apelosurgentes.com.br/en-us/"
LoadCompleted="WebView_LoadCompleted" />
How to read and manipulate the DOM of the document that was loaded on this WebView?
A:
How to interact with DOM on a UWP WebView?
You can use InvokeScriptAsync with the JavaScript eval function to use the HTML event handlers, and to use window.external.notify from the HTML event handler to notify the application using WebView.ScriptNotify.
private async void Button_Click(object sender, RoutedEventArgs e)
{
string functionString = String.Format("document.getElementById('nameDiv').innerText = 'Hello, {0}';", nameTextBox.Text);
await webView1.InvokeScriptAsync("eval", new string[] { functionString });
}
Scripts in the web view content can use window.external.notify with a string parameter to send information back to your app. To receive these messages, handle the ScriptNotify event.
public MyPage()
{
this.InitializeComponent();
MyWebView.ScriptNotify += MyWebView_ScriptNotify;
// Here we have to set the AllowedScriptNotifyUri property because we are
// navigating to some site where we don't own the content and we want to
// allow window.external.notify() to pass data back to the app.
List<Uri> allowedUris = new List<Uri>();
allowedUris.Add(new Uri("http://www.bing.com"));
MyWebView.AllowedScriptNotifyUris = allowedUris;
}
void MyWebView_ScriptNotify(object sender, NotifyEventArgs e)
{
// Respond to the script notification.
}
For more, Please refer to UWP WebView.
|
[
"askubuntu",
"0001086930.txt"
] | Q:
Are these miscellaneous problems/oddities during liveUSB normal? (lubuntu 18.10 i386)
First I noticed that the USB I'd booted from was listed twice:
To investigate, I opened the folder viewer and clicked to see mounted drives, and encountered a second error:
Then, checking out the new qlipper software it seems not to be displaying properly, and clicking on this displayed item does nothing:
The md5sum matched the stated hash, and I checked disk for errors before starting lubuntu and no errors were found. So is this all normal for various reasons, or should I not try to install from this liveUSB?
A:
First I noticed that the USB I'd booted from was listed twice:
This is normal for the ISO, and can safely be ignored.
Then, checking out the new qlipper software it seems not to be displaying properly, and clicking on this displayed item does nothing:
That's normal too; Qlipper is super minimal.
Proceed as normal :)
|
[
"codegolf.stackexchange",
"0000105858.txt"
] | Q:
12 hour to 24 hour time converter
Amazingly, this simple task doesn't seem to exist already, so...
Your task is to write a program that takes as input a 12 hour time, and converts it into "military time", or 24-hour time format.
Input will be in the form:
HH:MM am/pm
Although slight variations are allowed:
The space separating the am/pm from the rest of the time is optional.
The last part can either be "am"/"pm", or "a"/"p".
Any capitalization is fine.
Output will be the inputted time, transformed into 24-hour format. It can be a number, or a string.
For both the input and output:
0s in the first place are optional. 0s in the last 3 places are mandatory.
the delimiter separating hours and minutes can be a ":", " " (a space), or nothing.
Other notes:
Midnight can be expressed either as 0000, or 2400.
Midnight will be considered "am", while noon will be considered "pm".
Your program can be a function, or a full program, and should either return the result, or output it to the stdout. Trailing whitespace is ok.
Examples (you aren't required to support every format):
12:00am -> 0000
1200 pm -> 1200
1234Am -> 00:34
134a -> 134
01:34PM -> 13 34
1234pm -> 1234
This is code golf, so the smallest number of bytes wins. Since this is so trivial to solve using a built-in, it would be nice to see some code that manually solves this (but use of built-ins are fine).
Just to clarify, you aren't required to support every possible format. Supporting only a single input and a single output format (of your choice) is fine. I would however like to limit the format as outlined above (which is already quite free). {1134,'pm'}, for example, would be unacceptable.
A:
MATL, 4 bytes
15XO
Try it online!
Explanation
Built-in function: date string conversion with automatic detection of input format and with output format 15, which corresponds to 'HH:MM'. This is equivalent to @StewieGriffin's Octave answer.
A:
Octave, 21 17 bytes
Saved 4 bytes thanks to Luis Mendo. I could specify format number 15 instead of 'HHMM'.
@(c)datestr(c,15)
Explanation:
This is an anonymous function taking a string c as input on the format: '11:34 AM'. datestr recognizes the format automatically as one of the standard date formats, and outputs it in the specified format number 15, which is HH:MM.
Since the specified output format doesn't have AM or PM Octave automatically converts it to what you refer to as Military time.
Try it online.
A version not using datestr using 35 bytes
@(c)[c(1:4)+[1,2,0,0]*(c(5)>97),'']
Explanation:
Takes an input string c on the format 1134am.
@(c) % Anonymous function
[c(1:4) % First 4 elements of the string
+[1,2,0,0] % Add 1 and 2 to the first to the ASCII-value of the
first two characters
*)c(5)>97) % if the fifth element is larger than 97
(the ASCII code for `a`).
,''] % Implicitly convert to string
Or, a different approach for 37 bytes:
@(c)[c(1:4)+('1200'-48)*(c(5)>97),'']
A:
V, 21 17 bytes
Thanks @DJMcMayhem for 4 bytes!
í12:/0:
çp/12
$x
Try it online!
This takes the format HH:MMx where x is either a or p, and returns it in the format HH:MM
Hexdump:
00000000: ed31 323a 2f30 3a0a e770 2f31 3201 2478 .12:/0:..p/12.$x
00000010: 0ae7 612f 2478 ..a/$x
Explanation:
í12:/0: | find and replace 12: with 0: (to avoid replacing HH:12)
ç | on every line
p/ | that contains a p
12^A | increment 12 times (the first number, hours)
$x | delete the character at the end of the line
|
[
"stackoverflow",
"0030620287.txt"
] | Q:
NullPointerException after clicking login button in android
I am creating a login page and validating username and password using php,when i click on login button after entering username and password getting app has stopped unexpectedly and NullPointerException,have attached my code please any one tell me what mistake i have done.
Thank you.
mainactivity.java
package com.example.validation;
import java.io.InputStream;
import java.util.ArrayList;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.message.BasicNameValuePair;
import android.app.Activity;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.Button;
import android.widget.CheckBox;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.Toast;
public class MainActivity extends Activity {
Button login;
String name="",pass="";
byte[] data;
HttpPost httppost;
StringBuffer buffer;
HttpResponse response;
HttpClient httpclient;
InputStream inputStream;
SharedPreferences app_preferences ;
CheckBox check;
private EditText username=null;
private EditText password=null;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
app_preferences = PreferenceManager.getDefaultSharedPreferences(this);
username = (EditText) findViewById(R.id.editText1);
password = (EditText) findViewById(R.id.editText2);
login = (Button) findViewById(R.id.button);
check = (CheckBox) findViewById(R.id.check);
String Str_user = app_preferences.getString("username","0" );
String Str_pass = app_preferences.getString("password", "0");
String Str_check = app_preferences.getString("checked", "no");
if(Str_check.equals("yes"))
{
username.setText(Str_user);
password.setText(Str_pass);
check.setChecked(true);
}
login.setOnClickListener(new View.OnClickListener()
{
public void onClick(View v)
{
name = username.getText().toString();
pass = password.getText().toString();
String Str_check2 = app_preferences.getString("checked", "no");
if(Str_check2.equals("yes"))
{
SharedPreferences.Editor editor = app_preferences.edit();
editor.putString("username", name);
editor.putString("password", pass);
editor.commit();
}
if(name.equals("") || pass.equals(""))
{
Toast.makeText(MainActivity.this, "Blank Field..Please Enter", Toast.LENGTH_LONG).show();
}
else
{
try {
httpclient = new DefaultHttpClient();
httppost = new HttpPost("http://localhost/Purchase Order/userlogin.php");
// Add your data
ArrayList<NameValuePair> nameValuePairs = new ArrayList<NameValuePair>(2);
nameValuePairs.add(new BasicNameValuePair("UserEmail", name.trim()));
nameValuePairs.add(new BasicNameValuePair("Password", pass.trim()));
httppost.setEntity(new UrlEncodedFormEntity(nameValuePairs));
// Execute HTTP Post Request
response = httpclient.execute(httppost);
inputStream = response.getEntity().getContent();
data = new byte[256];
buffer = new StringBuffer();
int len = 0;
while (-1 != (len = inputStream.read(data)) )
{
buffer.append(new String(data, 0, len));
}
inputStream.close();
}
catch (Exception e)
{
Toast.makeText(MainActivity.this, "error"+e.toString(), Toast.LENGTH_LONG).show();
}
if(buffer.charAt(0)=='Y')
{
Toast.makeText(MainActivity.this, "login successfull", Toast.LENGTH_LONG).show();
}
else
{
Toast.makeText(MainActivity.this, "Invalid Username or password", Toast.LENGTH_LONG).show();
}
}
}
});
check.setOnClickListener(new View.OnClickListener()
{
public void onClick(View v)
{
// Perform action on clicks, depending on whether it's now checked
SharedPreferences.Editor editor = app_preferences.edit();
if (((CheckBox) v).isChecked())
{
editor.putString("checked", "yes");
editor.commit();
}
else
{
editor.putString("checked", "no");
editor.commit();
}
}
});
}
public void sendMessage(View view)
{
Intent intent = new Intent(MainActivity.this, Mainpage.class);
startActivity(intent);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
if (id == R.id.action_settings) {
return true;
}
return super.onOptionsItemSelected(item);
}
}
activity_main.xml
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent"
android:layout_height="match_parent" android:paddingLeft="@dimen/activity_horizontal_margin"
android:paddingRight="@dimen/activity_horizontal_margin"
android:paddingTop="@dimen/activity_vertical_margin"
android:paddingBottom="@dimen/activity_vertical_margin" tools:context=".MainActivity">
<ImageView
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:id="@+id/imageView"
android:src="@drawable/ic_launcher"
android:layout_below="@+id/textView"
android:layout_centerHorizontal="true" />
<Button
android:id="@+id/button"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_below="@+id/editText2"
android:layout_centerHorizontal="true"
android:onClick="sendMessage"
android:layout_marginTop="32dp"
android:layout_toLeftOf="@+id/textview"
android:layout_toStartOf="@+id/textview"
android:text="login" />
<EditText
android:id="@+id/editText1"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignParentEnd="true"
android:layout_alignParentRight="true"
android:layout_alignParentStart="true"
android:layout_alignParentLeft="true"
android:layout_below="@+id/imageView"
android:layout_marginTop="33dp"
android:ems="10"
android:focusable="true"
android:hint="Enter Name"
android:textColorHighlight="#ff7eff15"
android:textColorHint="#ffff25e6" />
<EditText
android:id="@+id/editText2"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignEnd="@+id/editText"
android:layout_alignLeft="@+id/editText1"
android:layout_alignRight="@+id/editText"
android:layout_alignParentStart="true"
android:layout_alignParentLeft="true"
android:layout_below="@+id/editText1"
android:layout_marginTop="29dp"
android:ems="10"
android:hint="Password"
android:inputType="textPassword"
android:textColorHint="#ffff299f" />
<CheckBox
android:id="@+id/check"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignLeft="@+id/editText2"
android:layout_below="@+id/button"
android:text="Remember me" />
</RelativeLayout>
userlogin.php
<?php
require_once('Purchase Order/dao/connectDB');
mysql_select_db($database_localhost,$localhost);
$username = $_POST['UserEmail'];
$password = $_POST['Password'];
$query_search = "select * from user_login where username = '".$username."' AND password = '".$password. "'";
$query_exec = mysql_query($query_search) or die(mysql_error());
$rows = mysql_num_rows($query_exec);
if($rows --> 0) {
echo "Y";
}
else {
echo "N";
}
?>
LogCat:
06-03 17:56:38.044: D/AndroidRuntime(416): Shutting down VM
06-03 17:56:38.044: W/dalvikvm(416): threadid=1: thread exiting with uncaught exception (group=0x4001d800)
06-03 17:56:38.063: E/AndroidRuntime(416): FATAL EXCEPTION: main
06-03 17:56:38.063: E/AndroidRuntime(416): java.lang.NullPointerException
06-03 17:56:38.063: E/AndroidRuntime(416): at com.example.validation.MainActivity$1.onClick(MainActivity.java:110)
06-03 17:56:38.063: E/AndroidRuntime(416): at android.view.View.performClick(View.java:2408)
06-03 17:56:38.063: E/AndroidRuntime(416): at android.view.View$PerformClick.run(View.java:8816)
06-03 17:56:38.063: E/AndroidRuntime(416): at android.os.Handler.handleCallback(Handler.java:587)
06-03 17:56:38.063: E/AndroidRuntime(416): at android.os.Handler.dispatchMessage(Handler.java:92)
06-03 17:56:38.063: E/AndroidRuntime(416): at android.os.Looper.loop(Looper.java:123)
06-03 17:56:38.063: E/AndroidRuntime(416): at android.app.ActivityThread.main(ActivityThread.java:4627)
06-03 17:56:38.063: E/AndroidRuntime(416): at java.lang.reflect.Method.invokeNative(Native Method)
06-03 17:56:38.063: E/AndroidRuntime(416): at java.lang.reflect.Method.invoke(Method.java:521)
06-03 17:56:38.063: E/AndroidRuntime(416): at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:868)
06-03 17:56:38.063: E/AndroidRuntime(416): at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:626)
06-03 17:56:38.063: E/AndroidRuntime(416): at dalvik.system.NativeStart.main(Native Method)
A:
Assuming that the following line is at line 110 of your file. (But lines have shifted after you posted the source code, I think)
if(buffer.charAt(0)=='Y')
The NullPointerException means that the buffer is still null when this point is reached.
To fix this bug quickly, change it to this:
if((buffer != null) && buffer.charAt(0)=='Y')
|
[
"stackoverflow",
"0000852135.txt"
] | Q:
How do I write normal Tilde(~) in C#?
If I add a Tilde("~") in the text property of a label,the label doesn't display it,instead it displays an upper tilde.
How do I write a normal tilde like in the bracksers("~")?
A:
Isn't that a font issue? Set the font to Courier New and test...
|
[
"stackoverflow",
"0008532088.txt"
] | Q:
Sybase Table Update Disable
I just want to do inserts/deletes and don't allow any update.
Is it possible to disable updates on a table?
If so, how?
A:
-- Because it's friday ...
REVOKE UPDATE ON hair TO pubic;
|
[
"gis.stackexchange",
"0000014556.txt"
] | Q:
Where can I get (USA) county neighbour relationship data?
I need a graph where each node is a US county and each edge represents a border shared between counties. I do not particularly care about the absolute position or shape of each county (though that would be a plus).
Where can I find that information (for free)? Army Corp of Engineers? US Geo Survey?
Ideally, I could just get a csv list something like
FL-Polk, FL-Lake
FL-Polk, FL-Orange
FL-Polk, FL-Osceola
etc for every county, including "kitty-corner" borders, borders across water, or across state borders
I'm a programmer so expect I can handle any standard exchange format. (That could just be hubris though). (I don't have any cool GIS apps. Just humble scripting in perl and python.)
A:
You could find a topological representation of county boundaries, such as the CTA boundary files (another source would be the OpenStreetMap, but that's not as complete) and then pick out all distinct pairs of county IDs on opposing sides of boundary lines - e.g. in the CTA files, there's these two fields in the link entity (as described in the documentation):
(10) county FIPS left I6 48-53
(11) county FIPS right I6 54-59
EDIT: After request, here's the details on how to get the data:
To get all county codes, go to the page linked above, click on the 'List of county codes'.
To read the adjacent IDs, there's two options:
download the shapefile ("GO" button next to Download SCUL), open the zip and read the .dbf file (there's loads of programs that open it, e.g. Excel, and the file is basically fixed-width-row ASCII table, so you should be able to extract the last two columns easily)
or download the native format (i'd first try to see if the state boundaries contains the counties as well) open the .llr file, split it by lines (line ends with CR+LF = 0D 0A) and extract the ids from each line: left FIPS is in chars 48-53; right FIPS is in chars 54-59 (index of the first character in a line is 1)
A:
UPDATE: Added spatial index to improve performance and brief instructions for using this script on Windows.
#-------------------------------------------------------------------------------
# This script will build an adjacency table in csv format representing
# county polygons that "neighbor" each other. This script is intended
# to illustrate the use of Python, OGR, Shapely, and Rtree.
#
# County shapefile used in this example: http://cta.ornl.gov/transnet/scuov.zip
#
# You will need Shapely, GDAL/OGR, and Rtree to run this. The easiest way for
# Windows users to obtain these dependencies is to download the OSGeo4W
# installer[1] and run an advanced install to retreive (located under Libs):
#
# gdal-python (1.8.0)
# libspatialindex (1.5.0-1, not 1.6.1)
#
# From the OSGeo4W shell, install setuptools via ez_setup.py[2], and then run
# these commands:
#
# python -m easy_install shapely
# python -m easy_install rtree
#
# This script can then be run from the OSGeo4W shell.
#
# [1] http://trac.osgeo.org/osgeo4w
# [2] http://trac.osgeo.org/osgeo4w/wiki/TracPlugins
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import sys, cPickle
from rtree import Rtree
from osgeo import ogr
from shapely import wkb
class FastRtree(Rtree):
def dumps(self, obj):
return cPickle.dumps(obj, -1)
def generator_function(list):
for i, obj in enumerate(list):
yield (i, obj[0].bounds, obj)
def main():
csv = r'C:\county_adjacency.csv' # Path to output csv, modify as needed
shapefile = r'C:\scuo.shp' # Path to existing shapefile, modify as needed
fipsindex = 3 # Field index for FIPS code
statenameindex = 0 # Field index for state name
countynameindex = 1 # Field index for county name
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shapefile)
if dataset is None:
print 'Open failed.'
sys.exit(1)
lyr = dataset.GetLayerByName('scuo') # Modify as needed
lyr.ResetReading()
countylist = []
for feat in lyr:
geom = feat.GetGeometryRef()
if geom is not None and (geom.GetGeometryType() == ogr.wkbPolygon or geom.GetGeometryType() == ogr.wkbMultiPolygon):
countytuple = wkb.loads(geom.ExportToWkb()), \
feat.GetFieldAsString(fipsindex), \
feat.GetFieldAsString(countynameindex), \
feat.GetFieldAsString(statenameindex)
countylist.append(countytuple)
else:
print 'Error reading polygon geometry for: ' + \
feat.GetFieldAsString(fipsindex) + ', ' + \
feat.GetFieldAsString(countynameindex) + ', ' + \
feat.GetFieldAsString(statenameindex)
idx = FastRtree(generator_function(countylist))
csvfile = open(csv, 'w')
csvfile.write('\"fips1\",\"county1\",\"state1\",\"fips2\",\"county2\",\"state2\"' + '\n')
for i in countylist:
for j in list(idx.intersection(i[0].bounds, objects='raw')):
try:
if i[0].touches(j[0]):
csvfile.write('\"' + \
i[1] + '\",\"' + \
i[2] + '\",\"' + \
i[3] + '\",\"' + \
j[1] + '\",\"' + \
j[2] + '\",\"' + \
j[3] + '\"\n')
except:
print 'Failed to evaluate: ' + \
i[1] + ',' + \
i[2] + ',' + \
i[3] + \
' with ' + \
j[1] + ',' + \
j[2] + ',' + \
j[3]
csvfile.close()
dataset = None
if __name__ == '__main__':
main()
A:
Since you mention you are a programmer, here's some code that works with arcgis 10. Update: If you don't feel like programming, I've posted a zipped shapefile of the graph here.
public void TestGetNeighbors()
{
var fLayer = ArcMap.Document.FocusMap.get_Layer(0) as IFeatureLayer;
var dict = GetNeighborsByName((ITopologyClass)fLayer.FeatureClass, "{0} {1}", "Name", "State_Name");
foreach (KeyValuePair<string, List<string>> kvp in dict)
{
Debug.WriteLine(kvp.Key);
foreach (string neighbor in kvp.Value)
Debug.WriteLine("\t" + neighbor);
}
}
private Dictionary<string, List<string>> GetNeighborsByName(ITopologyClass topoClass, string format, params object[] fldNames)
{
var fc = topoClass as IFeatureClass;
if (topoClass.Topology.Cache.BuildExtent == null || topoClass.Topology.Cache.BuildExtent.IsEmpty)
{
Debug.WriteLine("building ...");
topoClass.Topology.Cache.Build(((IGeoDataset)fc).Extent, false);
}
// get neighbors by oid
var oidDict = GetNeighborsByOid(topoClass);
// use the full names to build the output dictionary
var nameDict = GetFullNames(fc, format, fldNames);
var outDict = new Dictionary<string, List<string>>();
foreach (KeyValuePair<int, List<int>> kvp in oidDict)
{
var list = new List<string>();
foreach (int oid in kvp.Value)
list.Add(nameDict[oid]);
outDict.Add(nameDict[kvp.Key], list);
}
return outDict;
}
private Dictionary<int, List<int>> GetNeighborsByOid(ITopologyClass topoClass)
{
var outDict = new Dictionary<int, List<int>>();
IFeatureCursor fCur = ((IFeatureClass)topoClass).Search(null, false);
try
{
IFeature feat = null;
while ((feat = fCur.NextFeature()) != null)
{
var neighbors = GetNeighboringFeatureOids(topoClass.Topology.Cache,(IFeatureClass)topoClass , feat.OID);
outDict.Add(feat.OID, neighbors);
}
}
catch
{
throw;
}
finally
{
if(fCur != null)
System.Runtime.InteropServices.Marshal.FinalReleaseComObject(fCur);
}
return outDict;
}
private List<int> GetNeighboringFeatureOids(ITopologyGraph topoGraph, IFeatureClass fc, int oid)
{
var outList = new List<int>();
var enumEdge = topoGraph.GetParentEdges(fc, oid);
for (int i = 0; i < enumEdge.Count; i++)
{
var edge = enumEdge.Next();
outList.AddRange(GetParentOids(edge.get_LeftParents(true),fc));
outList.AddRange(GetParentOids(edge.get_RightParents(true),fc));
}
// handle the kitty-corner case
var enumNode = topoGraph.GetParentNodes(fc, oid);
for(int i=0; i<enumNode.Count;i++)
{
var node = enumNode.Next();
var list = GetParentOids(node.Parents, fc);
foreach (int neighborOid in list)
{
if (!outList.Contains(neighborOid))
outList.Add(neighborOid); // kitty corner
}
}
return Reduce(outList,oid);
}
private List<int> Reduce(List<int> inList, int omit)
{
var outList = new List<int>();
foreach (int i in inList)
{
if (!(outList.Contains(i) || omit == i))
outList.Add(i);
}
return outList;
}
private List<int> GetParentOids(IEnumTopologyParent enumParent, IFeatureClass fc)
{
var outList = new List<int>();
for (int i = 0; i < enumParent.Count; i++)
{
var parent = enumParent.Next();
if (parent.m_pFC == fc)
outList.Add(parent.m_FID);
}
return outList;
}
private Dictionary<int, string> GetFullNames(IFeatureClass fc, string format, params object[] fldNames)
{
var fldList = GetFieldIndexes(fc, fldNames);
var outDict = new Dictionary<int, string>();
IFeatureCursor fCur = fc.Search(null, false);
IFeature feat = null;
try
{
while ((feat = fCur.NextFeature()) != null)
{
var valList = new List<string>();
foreach (int idx in fldList)
{
string val = feat.get_Value(idx) is DBNull ? "" : feat.get_Value(idx).ToString();
valList.Add(val);
}
outDict.Add(feat.OID, String.Format(format, valList.ToArray()));
}
}
catch
{
throw;
}
finally
{
if (fCur != null)
System.Runtime.InteropServices.Marshal.FinalReleaseComObject(fCur);
}
return outDict;
}
private static List<int> GetFieldIndexes(IFeatureClass fc, object[] fldNames)
{
var fldList = new List<int>();
foreach (string fldName in fldNames)
{
int idx = fc.FindField(fldName);
if (idx == -1)
throw new Exception(string.Format("field {0} not found on {1}", fldName, fc.AliasName));
fldList.Add(idx);
}
return fldList;
}
It produces output like this:
Yukon-Koyukuk Alaska
North Slope Alaska
Northwest Arctic Alaska
Southeast Fairbanks Alaska
Nome Alaska
Fairbanks North Star Alaska
Denali Alaska
Wade Hampton Alaska
Matanuska-Susitna Alaska
Bethel Alaska
Southeast Fairbanks Alaska
Yukon-Koyukuk Alaska
Fairbanks North Star Alaska
Denali Alaska
Matanuska-Susitna Alaska
Valdez-Cordova Alaska ...
|
[
"stackoverflow",
"0030201374.txt"
] | Q:
Calling Stored Procedure from MSAccess fails, but running the exact same command from Sql Server Manager works
I have an Access DB that has a bunch of linked tables from a SQL Server database. The Access DB calls a stored procedure on the SQL Server database that updates data on a form.
Dim sql As String
Dim cnn As ADODB.Connection
Set cnn = New ADODB.Connection
cnn.ConnectionString = "DSN=Records"
cnn.CommandTimeout = 90
cnn.Open
sql = "exec myStoredProcedure @param1=" & Me.txtParam1Field & ", @param2=" & Me.txtParam2Field
cnn.Execute sql
Set cnn = Nothing
frmMyForm.Requery
When I run this it either times out, if the CommandTimeout value isn't long enough, or it executes, but doesn't actually execute myStoredProcedure for some reason. If I take the string sql and past it into Sql Server Manager, myStoredProcedure executes in less than a second and everything works great.
I've tried debugging over this code in Access, but I'm not getting any useful results when I step over cnn.Execute sql.
A:
Depending on the values of txtParam1Field and txtParam2Field you probably want to enclose the values with single quote like so:
sql = "exec myStoredProcedure @param1='" & Me.txtParam1Field & "', @param2='" & Me.txtParam2Field & "'"
If we take your original code and assume that txtParam1Field is equal to 1 and txtParam2Field is equal to John then your generated sql will not execute because it will look like this:
exec myStoredProcedure @param1=1, @param2=John
Your best bet is to output the value of "sql" variable in debug window and run that exact statement in sql query manager. That will tell you exactly where the problem is if it's malformed SQL.
|
[
"stackoverflow",
"0043543028.txt"
] | Q:
file_get_contents but no load
i create a php code to get the first m3u8 link from the tag " hlsUrl " from this page by using file_get_contents but it's not working , any help please
my code is
> <?php
>
> $link =
> "https://stream.live/-/streams/users/kawkaw?includeTrending=true" ;
>
> $actualLink = file_get_contents($link);
> echo $actualLink->find("hlsUrl");
>
> ?>
A:
Your file_get_contents will return string JSON. So you need to parse it first.
$link = "https://stream.live/-/streams/users/kawkaw?includeTrending=true";
$actualLink = file_get_contents($link);
$actualJson = json_decode($actualLink);
echo $actualJson->trendingStreams[0]->hlsUrl;
|
[
"math.stackexchange",
"0003166373.txt"
] | Q:
Prove that three vectors are coplanar
Three vectors are given: $u,v,w$. It is given that:
$|u|=|v|=|w|= \sqrt{2}$; $u\cdot v=u\cdot w=v\cdot w=-1$.
Prove that vectors $u,v,w$ are coplanar (on the same plane).
I have a few ideas, but I don't know if they are helpful in this case:
I know that three vectors are co-planar if $u\cdot(v x w)=0$.
In addition, I assume that you can prove it with linear dependence, but I don't know how to use it here.
In addition, I thought that maybe the angle between the vectors can be of help- $120$ degrees between every $2$ vectors- but does that necessarily mean that they are on the same plain- co-planar?
A:
The sum of three angles formed by three non-coplanar vectors is always less than 360 degrees. However instead of proving this general statement one can arrive at the required result immediately, observing that the vectors in the problem form an equilateral triangle. An easy check shows:
$$(u+v+w)\cdot(u+v+w)=|u|^2+|v|^2+|w|^2+2u\cdot v+2v\cdot w+2w\cdot u=0\\
\implies u+v+w=0.$$
As the three vectors are linearly dependent, they are coplanar.
|
[
"stackoverflow",
"0003313886.txt"
] | Q:
A name was started with an invalid character. Error processing resource
Here is the exact error I'm getting when I try to launch my default.aspx file from the published folder. Can anybody point me in the right direction?
The XML page cannot be displayed
Cannot view XML input using XSL style sheet. Please correct the error and then click the Refresh button, or try again later.
--------------------------------------------------------------------------------
A name was started with an invalid character. Error processing resource 'file:///C:/inetpub/wwwroot/MHNProServices/Default....
<%@ Page Title="" Language="C#" MasterPageFile="~/ProServices.Master" AutoEventWireup="true" CodeBehind="Default.aspx.cs"...
A:
This issue was caused because I needed to run the website from an IIS server, instead of launching it directly from the folder I published it to. Oops.
|
[
"stackoverflow",
"0010233142.txt"
] | Q:
Set Visual Studio to create mdf file that SQL Server 2005 supports
I am using Visual Studio 2010 and I added service–based database (.mdf file ) to my project.
The issue is that in the deployment server we have only SQL Server 2005 BUT SQL Server 2005 doesn’t support the .mdf file that Visual Studio 2010 created.
I can’t change the SQL Server in the deployment server so my question is there any why to tell Visual Studio to create an .mdf file that SQL Server 2005 supports?
If so - what will be the impact to my code
A:
You can always export the SQL script of your database out of any server version and execute the script on another (including lower) version of the server.
Since the script can contain both metadata and actual data, this option should make your database compatible with SQL 2005 even if you use a newer version on the development machine.
|
[
"stackoverflow",
"0025114093.txt"
] | Q:
How to use takeWhile in python
I'm not getting the right answer but I cant figure out where is my problem
def GC_content(genetic_string):
"""
:param genetic_string:
:return: GC content in percentage
:testmod
>>> GC_content("CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT")
60.919540
"""
counter = 0.0
for index in takewhile(lambda x: x is "C" or x is "G", list(genetic_string)):
counter += 1.0
return counter/float(len(genetic_string))
A:
You seem to be looking for:
s = list(genetic_string)
return (s.count('G') + s.count('C')) / float(len(s)) * 100
takewhile only takes characters from the beginning of the iterable (string) as long as the given condition holds but you wish to be looking at the whole string. In other words, takewhile isn't very suitable if you wish to come up with that 60.919540 result.
|
[
"stackoverflow",
"0009437940.txt"
] | Q:
What is the point of the @2x for Retina display apps?
I understand that the Retina display has 2x as many pixels as the non retina displays, but what is difference between using the @2x version and taking and taking the 512 x 512 image and constraining it via the size of the frame ?
To Clarify:
if I have a button that is 72 x 72 The proper way to display that on an iPhone is to have a
image.png = 72x72
[email protected] = 144 x 144 <---Fixed :) TY
But why not just use 1 image:
image.png = 512x512
and do something like this:
UIImageView *myImage = [[UIImageView alloc] init ];
[myImage setImage:[UIImage imageNamed:@"image.png"]];
[myImage setFrame:CGRectMake(50, 50, 72, 72)];
I am sure there is a good reason, I just dont know what it is, other then possibly a smaller app file size?
Thanks for the education!
A:
There are several good reasons for sizing your images correctly, but the main one would have to be image clarity: When resizing images, you often end up w/ artifacts that make a picture look muddy or pixelated. By creating the images at the correct size, you'll know exactly what the end user will see on his or her screen.
Another reason would simply be to cut down on the overall file size of your binary: a 16x16 icon takes up orders of magnitude fewer bytes than a 512x512 image.
And if you need a third reason: Convenience methods such as [UIImage imageWithName:@"xxxx"] produce images of actual size and usually do not need additional frame/bounds code to go along with them. If you know the size, you can save yourself a lot of headache.
|
[
"stackoverflow",
"0016392107.txt"
] | Q:
RVM and Ruby 2.0.0 under Ubuntu 12.04
When trying to install Ruby using RVM under my Ubuntu 12.04 (with rvm install 2.0.0), I get the following:
No binary rubies available for: ubuntu/12.10/i386/ruby-2.0.0-p0.
And it tries to fetch repositories but it doesn't install anything. Shall I add some repositories or what should I do?
I also have tried running rvm requirements but no luck either.
What I get after running rvm install 2.0.0 is:
Searching for binary rubies, this might take some time.
No binary rubies available for: ubuntu/12.10/i386/ruby-2.0.0-p0.
Continuing with compilation. Please read 'rvm mount' to get more information on binary rubies.
Hit http://archive.ubuntu.com quantal Release.gpg
Hit http://dl.google.com stable Release.gpg
Get:1 http://extras.ubuntu.com quantal Release.gpg [72 B]
Ign http://ppa.launchpad.net quantal Release.gpg
Hit http://archive.canonical.com quantal Release.gpg
Hit http://repository.spotify.com stable Release.gpg
Hit http://archive.ubuntu.com quantal-updates Release.gpg
Hit http://extras.ubuntu.com quantal Release
Hit http://dl.google.com stable Release.gpg
Hit http://ppa.launchpad.net quantal Release.gpg
Hit http://archive.canonical.com quantal Release
Hit http://repository.spotify.com stable Release
Get:2 http://archive.ubuntu.com quantal-backports Release.gpg [933 B]
Hit http://downloads-distro.mongodb.org dist Release.gpg
Hit http://extras.ubuntu.com quantal/main Sources
Ign http://ppa.launchpad.net quantal Release
Hit http://dl.google.com stable Release
Hit http://archive.canonical.com quantal/partner i386 Packages
Hit http://repository.spotify.com stable/non-free Sources
Hit http://archive.ubuntu.com quantal-security Release.gpg
Hit http://extras.ubuntu.com quantal/main i386 Packages
Hit http://ppa.launchpad.net quantal Release
Hit http://dl.google.com stable Release
Hit http://repository.spotify.com stable/non-free i386 Packages
Hit http://archive.ubuntu.com quantal Release
Hit http://dl.google.com stable/main i386 Packages
Hit http://archive.ubuntu.com quantal-updates Release
Get:3 http://linux.dropbox.com precise Release.gpg [489 B]
Get:4 http://archive.ubuntu.com quantal-backports Release [49.6 kB]
Hit http://downloads-distro.mongodb.org dist Release
Hit http://toolbelt.heroku.com ./ Release.gpg
Hit http://ppa.launchpad.net quantal/main Sources
Hit http://dl.google.com stable/main i386 Packages
Get:5 http://linux.dropbox.com precise Release [2,603 B]
Hit http://archive.ubuntu.com quantal-security Release
Hit http://ppa.launchpad.net quantal/main i386 Packages
Hit http://archive.ubuntu.com quantal/main Sources
Hit http://downloads-distro.mongodb.org dist/10gen i386 Packages
Hit http://archive.ubuntu.com quantal/restricted Sources
Ign http://archive.canonical.com quantal/partner Translation-en_US
Get:6 http://linux.dropbox.com precise/main i386 Packages [1,148 B]
Hit http://toolbelt.heroku.com ./ Release
Ign http://extras.ubuntu.com quantal/main Translation-en_US
Hit http://archive.ubuntu.com quantal/universe Sources
Ign http://archive.canonical.com quantal/partner Translation-en
Ign http://extras.ubuntu.com quantal/main Translation-en
Hit http://archive.ubuntu.com quantal/multiverse Sources
Ign http://repository.spotify.com stable/non-free Translation-en_US
Hit http://archive.ubuntu.com quantal/main i386 Packages
Ign http://repository.spotify.com stable/non-free Translation-en
Hit http://archive.ubuntu.com quantal/restricted i386 Packages
Hit http://archive.ubuntu.com quantal/universe i386 Packages
Hit http://toolbelt.heroku.com ./ Packages
Hit http://archive.ubuntu.com quantal/multiverse i386 Packages
Hit http://archive.ubuntu.com quantal/main Translation-en
Hit http://archive.ubuntu.com quantal/multiverse Translation-en
Hit http://archive.ubuntu.com quantal/restricted Translation-en
Hit http://archive.ubuntu.com quantal/universe Translation-en
Ign http://dl.google.com stable/main Translation-en_US
Hit http://archive.ubuntu.com quantal-updates/main Sources
Ign http://dl.google.com stable/main Translation-en
Hit http://archive.ubuntu.com quantal-updates/restricted Sources
Hit http://archive.ubuntu.com quantal-updates/universe Sources
Ign http://dl.google.com stable/main Translation-en_US
Hit http://archive.ubuntu.com quantal-updates/multiverse Sources
Ign http://dl.google.com stable/main Translation-en
Hit http://archive.ubuntu.com quantal-updates/main i386 Packages
Err http://ppa.launchpad.net quantal/main Sources
404 Not Found
Hit http://archive.ubuntu.com quantal-updates/restricted i386 Packages
Err http://ppa.launchpad.net quantal/main i386 Packages
404 Not Found
Hit http://archive.ubuntu.com quantal-updates/universe i386 Packages
Ign http://ppa.launchpad.net quantal/main Translation-en_US
Hit http://archive.ubuntu.com quantal-updates/multiverse i386 Packages
Ign http://ppa.launchpad.net quantal/main Translation-en
Ign http://ppa.launchpad.net quantal/main Translation-en_US
Ign http://ppa.launchpad.net quantal/main Translation-en
Ign http://toolbelt.heroku.com ./ Translation-en_US
Hit http://archive.ubuntu.com quantal-updates/main Translation-en
Ign http://linux.dropbox.com precise/main Translation-en_US
Hit http://archive.ubuntu.com quantal-updates/multiverse Translation-en
Hit http://archive.ubuntu.com quantal-updates/restricted Translation-en
Ign http://linux.dropbox.com precise/main Translation-en
Hit http://archive.ubuntu.com quantal-updates/universe Translation-en
Get:7 http://archive.ubuntu.com quantal-backports/main Sources [14 B]
Get:8 http://archive.ubuntu.com quantal-backports/restricted Sources [14 B]
Ign http://downloads-distro.mongodb.org dist/10gen Translation-en_US
Get:9 http://archive.ubuntu.com quantal-backports/universe Sources [13.7 kB]
Ign http://toolbelt.heroku.com ./ Translation-en
Get:10 http://archive.ubuntu.com quantal-backports/multiverse Sources [1,306 B]
Get:11 http://archive.ubuntu.com quantal-backports/main i386 Packages [14 B]
Get:12 http://archive.ubuntu.com quantal-backports/restricted i386 Packages [14 B]
Ign http://downloads-distro.mongodb.org dist/10gen Translation-en
Get:13 http://archive.ubuntu.com quantal-backports/universe i386 Packages [18.1 kB]
Get:14 http://archive.ubuntu.com quantal-backports/multiverse i386 Packages [1,434 B]
Hit http://archive.ubuntu.com quantal-backports/main Translation-en
Hit http://archive.ubuntu.com quantal-backports/multiverse Translation-en
Hit http://archive.ubuntu.com quantal-backports/restricted Translation-en
Hit http://archive.ubuntu.com quantal-backports/universe Translation-en
Hit http://archive.ubuntu.com quantal-security/main Sources
Hit http://archive.ubuntu.com quantal-security/restricted Sources
Hit http://archive.ubuntu.com quantal-security/universe Sources
Hit http://archive.ubuntu.com quantal-security/multiverse Sources
Hit http://archive.ubuntu.com quantal-security/main i386 Packages
Hit http://archive.ubuntu.com quantal-security/restricted i386 Packages
Hit http://archive.ubuntu.com quantal-security/universe i386 Packages
Hit http://archive.ubuntu.com quantal-security/multiverse i386 Packages
Hit http://archive.ubuntu.com quantal-security/main Translation-en
Hit http://archive.ubuntu.com quantal-security/multiverse Translation-en
Hit http://archive.ubuntu.com quantal-security/restricted Translation-en
Hit http://archive.ubuntu.com quantal-security/universe Translation-en
Ign http://archive.ubuntu.com quantal/main Translation-en_US
Ign http://archive.ubuntu.com quantal/multiverse Translation-en_US
Ign http://archive.ubuntu.com quantal/restricted Translation-en_US
Ign http://archive.ubuntu.com quantal/universe Translation-en_US
Ign http://archive.ubuntu.com quantal-updates/main Translation-en_US
Ign http://archive.ubuntu.com quantal-updates/multiverse Translation-en_US
Ign http://archive.ubuntu.com quantal-updates/restricted Translation-en_US
Ign http://archive.ubuntu.com quantal-updates/universe Translation-en_US
Ign http://archive.ubuntu.com quantal-backports/main Translation-en_US
Ign http://archive.ubuntu.com quantal-backports/multiverse Translation-en_US
Ign http://archive.ubuntu.com quantal-backports/restricted Translation-en_US
Ign http://archive.ubuntu.com quantal-backports/universe Translation-en_US
Ign http://archive.ubuntu.com quantal-security/main Translation-en_US
Ign http://archive.ubuntu.com quantal-security/multiverse Translation-en_US
Ign http://archive.ubuntu.com quantal-security/restricted Translation-en_US
Ign http://archive.ubuntu.com quantal-security/universe Translation-en_US
Fetched 89.4 kB in 11s (7,694 B/s)
W: Failed to fetch http://ppa.launchpad.net/mixxx/mixxx/ubuntu/dists/quantal/main/source/Sources 404 Not Found
W: Failed to fetch http://ppa.launchpad.net/mixxx/mixxx/ubuntu/dists/quantal/main/binary-i386/Packages 404 Not Found
E: Some index files failed to download. They have been ignored, or old ones used instead.
A:
The errors are irrelevant to you installing ruby - you have some broken dependencies (whatever the PPA for mixxx/mixxx is). Remove it from your software sources, and try again.
|
[
"stackoverflow",
"0046471663.txt"
] | Q:
createObject("Excel.Application.11") not working
Set XlApp= createObject("Excel.Application.11","Localhost") doesnot work in For office 2016. Please let me know what should be the last no. for office 16.
regards
anna
A:
I couldn't test it, but this should work for Office 2016:
Set XlApp = createObject("Excel.Application.16","Localhost").
See also Active X Error With Excel 2016 And Late Binding
|
[
"stackoverflow",
"0042266392.txt"
] | Q:
Unable to create and send dynamic CSV file as attachment over mail
I am trying to create and send dynamic csv file as attachment over mail, but it is not working. I am able to send mail with attachement, but file is going as empty. Below is my code:
var notProcessedRecords = FileFTPData.Where(x => x.LastName.StartsWith("9999")).ToList();
string loggedInUserName = "Administrator";
try
{
MemoryStream stream = new MemoryStream();
TextWriter writer = new StreamWriter(stream, Encoding.Default);
StringBuilder sb = new StringBuilder();
sb.AppendFormat(string.Format("{0}, {1}, {2}, {3}, {4}, {5} ", "FIRSTNAME", "LASTNAME", "USERID", "COMPANYNAME", "EMAIL", "PHONE"));
foreach (var item in notProcessedRecords)
{
sb.AppendLine();
sb.AppendFormat(string.Format("{0}, {1}, {2}, {3}, {4}, {5} ", item.FirstName, item.LastName, item.UserId, item.CompanyName, item.Email, item.Phone));
}
writer.WriteLine(sb);
MailMessage mail = new MailMessage();
SmtpClient SmtpServer = new SmtpClient("smtp.gmail.com");
mail.From = new MailAddress("******@gmail.com");
mail.To.Add("*******@yahoo.com");
mail.Subject = "FTP file processing status";
mail.Body = "<div>Hello " + loggedInUserName + ", </br></br> Following item are restricted from processing, due to some errors. Please check Process description for the same. </br></br> From, </br>Streben Support </div>";
mail.IsBodyHtml = true;
Attachment attachment = new Attachment(stream, new ContentType("text/csv"));
attachment.Name = "test.csv";
mail.Attachments.Add(attachment);
SmtpServer.Port = 587;
SmtpServer.Credentials = new System.Net.NetworkCredential("******@gmail.com", "******");
SmtpServer.EnableSsl = true;
SmtpServer.Send(mail);
}
catch(Exception ex)
{
}
Can anybody help me in correcting the issue?
A:
As you have just finished writing to the stream the Position property will be set to the end of the stream. Additionally, there may still be some data buffered in the writer that hasn't been flushed to the stream. Because of this, when the attachment is created from the stream it will appear to be empty. To fix this do:
// Done writing here //
writer.Flush();
stream.Position = 0;
// Create attachment here //
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.