response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
To be expansive, we include audit log entries for events that
either modified the target user or where the target user modified
something (E.g. if they changed the settings for a stream). | def custom_fetch_realm_audit_logs_for_user(response: TableData, context: Context) -> None:
"""To be expansive, we include audit log entries for events that
either modified the target user or where the target user modified
something (E.g. if they changed the settings for a stream).
"""
user = context["user"]
query = RealmAuditLog.objects.filter(Q(modified_user_id=user.id) | Q(acting_user_id=user.id))
rows = make_raw(list(query))
response["zerver_realmauditlog"] = rows |
Simple custom fetch function to fetch only the ScheduledMessage objects that we're allowed to. | def custom_fetch_scheduled_messages(response: TableData, context: Context) -> None:
"""
Simple custom fetch function to fetch only the ScheduledMessage objects that we're allowed to.
"""
realm = context["realm"]
exportable_scheduled_message_ids = context["exportable_scheduled_message_ids"]
query = ScheduledMessage.objects.filter(realm=realm, id__in=exportable_scheduled_message_ids)
rows = make_raw(list(query))
response["zerver_scheduledmessage"] = rows |
Simple custom fetch function to fix up .acting_user for some RealmAuditLog objects.
Certain RealmAuditLog objects have an acting_user that is in a different .realm, due to
the possibility of server administrators (typically with the .is_staff permission) taking
certain actions to modify UserProfiles or Realms, which will set the .acting_user to
the administrator's UserProfile, which can be in a different realm. Such an acting_user
cannot be imported during organization import on another server, so we need to just set it
to None. | def custom_fetch_realm_audit_logs_for_realm(response: TableData, context: Context) -> None:
"""
Simple custom fetch function to fix up .acting_user for some RealmAuditLog objects.
Certain RealmAuditLog objects have an acting_user that is in a different .realm, due to
the possibility of server administrators (typically with the .is_staff permission) taking
certain actions to modify UserProfiles or Realms, which will set the .acting_user to
the administrator's UserProfile, which can be in a different realm. Such an acting_user
cannot be imported during organization import on another server, so we need to just set it
to None.
"""
realm = context["realm"]
query = RealmAuditLog.objects.filter(realm=realm).select_related("acting_user")
realmauditlog_objects = list(query)
for realmauditlog in realmauditlog_objects:
if realmauditlog.acting_user is not None and realmauditlog.acting_user.realm_id != realm.id:
realmauditlog.acting_user = None
rows = make_raw(realmauditlog_objects)
response["zerver_realmauditlog"] = rows |
As part of the system for doing parallel exports, this runs on one
batch of Message objects and adds the corresponding UserMessage
objects. (This is called by the export_usermessage_batch
management command).
See write_message_partial_for_query for more context. | def export_usermessages_batch(
input_path: Path, output_path: Path, consent_message_id: Optional[int] = None
) -> None:
"""As part of the system for doing parallel exports, this runs on one
batch of Message objects and adds the corresponding UserMessage
objects. (This is called by the export_usermessage_batch
management command).
See write_message_partial_for_query for more context."""
assert input_path.endswith((".partial", ".locked"))
assert output_path.endswith(".json")
with open(input_path, "rb") as input_file:
input_data: MessagePartial = orjson.loads(input_file.read())
message_ids = {item["id"] for item in input_data["zerver_message"]}
user_profile_ids = set(input_data["zerver_userprofile_ids"])
realm = Realm.objects.get(id=input_data["realm_id"])
zerver_usermessage_data = fetch_usermessages(
realm, message_ids, user_profile_ids, output_path, consent_message_id
)
output_data: TableData = dict(
zerver_message=input_data["zerver_message"],
zerver_usermessage=zerver_usermessage_data,
)
write_table_data(output_path, output_data)
os.unlink(input_path) |
Scheduled messages are private to the sender, so which ones we export depends on the
public/consent/full export mode. | def get_exportable_scheduled_message_ids(
realm: Realm, public_only: bool = False, consent_message_id: Optional[int] = None
) -> Set[int]:
"""
Scheduled messages are private to the sender, so which ones we export depends on the
public/consent/full export mode.
"""
if public_only:
return set()
if consent_message_id:
sender_ids = get_consented_user_ids(consent_message_id)
return set(
ScheduledMessage.objects.filter(sender_id__in=sender_ids, realm=realm).values_list(
"id", flat=True
)
)
return set(ScheduledMessage.objects.filter(realm=realm).values_list("id", flat=True)) |
Use this function if you need a HUGE number of ids from
the database, and you don't mind a few extra trips. Particularly
for exports, we don't really care about a little extra time
to finish the export--the much bigger concern is that we don't
want to overload our database all at once, nor do we want to
keep a whole bunch of Django objects around in memory.
So our general process is to call this function first, and then
we call chunkify to break our ids into small chunks for "fat query"
batches.
Even if you are not working at huge scale, this function can
also be used for the convenience of its API. | def get_id_list_gently_from_database(*, base_query: Any, id_field: str) -> List[int]:
"""
Use this function if you need a HUGE number of ids from
the database, and you don't mind a few extra trips. Particularly
for exports, we don't really care about a little extra time
to finish the export--the much bigger concern is that we don't
want to overload our database all at once, nor do we want to
keep a whole bunch of Django objects around in memory.
So our general process is to call this function first, and then
we call chunkify to break our ids into small chunks for "fat query"
batches.
Even if you are not working at huge scale, this function can
also be used for the convenience of its API.
"""
min_id = -1
all_ids = []
batch_size = 10000 # we are just getting ints
assert id_field == "id" or id_field.endswith("_id")
while True:
filter_args = {f"{id_field}__gt": min_id}
new_ids = list(
base_query.values_list(id_field, flat=True)
.filter(**filter_args)
.order_by(id_field)[:batch_size]
)
if len(new_ids) == 0:
break
all_ids += new_ids
min_id = new_ids[-1]
return all_ids |
This function computes page_params for when we load the home page.
The page_params data structure gets sent to the client. | def build_page_params_for_home_page_load(
request: HttpRequest,
user_profile: Optional[UserProfile],
realm: Realm,
insecure_desktop_app: bool,
narrow: List[NarrowTerm],
narrow_stream: Optional[Stream],
narrow_topic_name: Optional[str],
needs_tutorial: bool,
) -> Tuple[int, Dict[str, object]]:
"""
This function computes page_params for when we load the home page.
The page_params data structure gets sent to the client.
"""
client_capabilities = {
"notification_settings_null": True,
"bulk_message_deletion": True,
"user_avatar_url_field_optional": True,
"stream_typing_notifications": True,
"user_settings_object": True,
"linkifier_url_template": True,
"user_list_incomplete": True,
}
if user_profile is not None:
client = RequestNotes.get_notes(request).client
assert client is not None
state_data = do_events_register(
user_profile,
realm,
client,
apply_markdown=True,
client_gravatar=True,
slim_presence=True,
client_capabilities=client_capabilities,
narrow=narrow,
include_streams=False,
)
queue_id = state_data["queue_id"]
default_language = state_data["user_settings"]["default_language"]
else:
# The spectator client will be fetching the /register response
# for spectators via the API.
state_data = None
queue_id = None
default_language = realm.default_language
if user_profile is None:
request_language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, default_language)
else:
request_language = get_and_set_request_language(
request,
default_language,
translation.get_language_from_path(request.path_info),
)
furthest_read_time = get_furthest_read_time(user_profile)
two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None
billing_info = get_billing_info(user_profile)
user_permission_info = get_user_permission_info(user_profile)
# Pass parameters to the client-side JavaScript code.
# These end up in a JavaScript Object named 'page_params'.
#
# Sync this with home_params_schema in base_page_params.ts.
page_params: Dict[str, object] = dict(
page_type="home",
## Server settings.
test_suite=settings.TEST_SUITE,
insecure_desktop_app=insecure_desktop_app,
login_page=settings.HOME_NOT_LOGGED_IN,
warn_no_email=settings.WARN_NO_EMAIL,
# Only show marketing email settings if on Zulip Cloud
corporate_enabled=settings.CORPORATE_ENABLED,
## Misc. extra data.
language_list=get_language_list(),
needs_tutorial=needs_tutorial,
furthest_read_time=furthest_read_time,
bot_types=get_bot_types(user_profile),
two_fa_enabled=two_fa_enabled,
apps_page_url=get_apps_page_url(),
show_billing=billing_info.show_billing,
show_remote_billing=billing_info.show_remote_billing,
promote_sponsoring_zulip=promote_sponsoring_zulip_in_realm(realm),
show_plans=billing_info.show_plans,
sponsorship_pending=billing_info.sponsorship_pending,
show_webathena=user_permission_info.show_webathena,
# Adding two_fa_enabled as condition saves us 3 queries when
# 2FA is not enabled.
two_fa_enabled_user=two_fa_enabled and bool(default_device(user_profile)),
is_spectator=user_profile is None,
# There is no event queue for spectators since
# events support for spectators is not implemented yet.
no_event_queue=user_profile is None,
server_sentry_dsn=settings.SENTRY_FRONTEND_DSN,
)
if settings.SENTRY_FRONTEND_DSN is not None:
page_params["realm_sentry_key"] = realm.string_id
page_params["server_sentry_environment"] = get_config(
"machine", "deploy_type", "development"
)
page_params["server_sentry_sample_rate"] = settings.SENTRY_FRONTEND_SAMPLE_RATE
page_params["server_sentry_trace_rate"] = settings.SENTRY_FRONTEND_TRACE_RATE
page_params["state_data"] = state_data
if narrow_stream is not None and state_data is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = narrow_stream.recipient
state_data["max_message_id"] = -1
max_message = (
# Uses index: zerver_message_realm_recipient_id
Message.objects.filter(realm_id=realm.id, recipient=recipient)
.order_by("-id")
.only("id")
.first()
)
if max_message:
state_data["max_message_id"] = max_message.id
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic_name is not None:
page_params["narrow_topic"] = narrow_topic_name
page_params["narrow"] = [
dict(operator=term.operator, operand=term.operand) for term in narrow
]
assert isinstance(state_data["user_settings"], dict)
state_data["user_settings"]["enable_desktop_notifications"] = False
page_params["translation_data"] = get_language_translation_data(request_language)
if user_profile is None:
# Get rendered version of realm description which is displayed in right
# sidebar for spectator.
page_params["realm_rendered_description"] = get_realm_rendered_description(realm)
page_params["language_cookie_name"] = settings.LANGUAGE_COOKIE_NAME
return queue_id, page_params |
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process. | def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message["has_attachment"] is True:
for key, value in path_maps["attachment_path"].items():
if key in message["content"]:
message["content"] = message["content"].replace(key, value)
if message["rendered_content"]:
message["rendered_content"] = message["rendered_content"].replace(
key, value
) |
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service. | def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
event_last_message_id = get_last_message_id()
event_time = timezone_now()
recipient_id_to_stream_id = {
d["id"]: d["type_id"] for d in data["zerver_recipient"] if d["type"] == Recipient.STREAM
}
for sub in data["zerver_subscription"]:
recipient_id = sub["recipient_id"]
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub["user_profile_id"]
all_subscription_logs.append(
RealmAuditLog(
realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
)
)
RealmAuditLog.objects.bulk_create(all_subscription_logs) |
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports. | def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item["token"] = generate_api_key() |
Build new huddle hashes with the updated ids of the users | def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list["huddle_to_user_list"][huddle["id"]]
huddle["huddle_hash"] = get_huddle_hash(user_id_list) |
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids | def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list["huddle_to_user_list"] = {
value: [] for value in ID_MAP["recipient_to_huddle_map"].values()
}
for subscription in data[table]:
if subscription["recipient"] in ID_MAP["recipient_to_huddle_map"]:
huddle_id = ID_MAP["recipient_to_huddle_map"][subscription["recipient"]]
id_map_to_list["huddle_to_user_list"][huddle_id].append(subscription["user_profile_id"]) |
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped. | def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_ids = {
item["id"]
for item in data["zerver_customprofilefield"]
if item["field_type"] == CustomProfileField.USER
}
for item in data["zerver_customprofilefieldvalue"]:
if item["field_id"] in field_type_USER_ids:
old_user_id_list = orjson.loads(item["value"])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table="zerver_customprofilefieldvalue",
field_name="value",
related_table="user_profile",
old_id_list=old_user_id_list,
)
item["value"] = orjson.dumps(new_id_list).decode() |
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform. | def fix_message_rendered_content(
realm: Realm, sender_map: Dict[int, Record], messages: List[Record]
) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message["rendered_content"] is not None:
# For Zulip->Zulip imports, we use the original rendered
# Markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
# name.
#
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
soup = BeautifulSoup(message["rendered_content"], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
user_id_map = ID_MAP["user_profile"]
for mention in user_mentions:
if not mention.has_attr("data-user-id"):
# Legacy mentions don't have a data-user-id
# field; we should just import them
# unmodified.
continue
if mention["data-user-id"] == "*":
# No rewriting is required for wildcard mentions
continue
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
message["rendered_content"] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
stream_id_map = ID_MAP["stream"]
for mention in stream_mentions:
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
message["rendered_content"] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
user_group_id_map = ID_MAP["usergroup"]
for mention in user_group_mentions:
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
message["rendered_content"] = str(soup)
continue
try:
content = message["content"]
sender_id = message["sender_id"]
sender = sender_map[sender_id]
sent_by_bot = sender["is_bot"]
translate_emoticons = sender["translate_emoticons"]
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words_automaton = None
rendered_content = markdown_convert(
content=content,
realm_alert_words_automaton=realm_alert_words_automaton,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
).rendered_content
message["rendered_content"] = rendered_content
if "scheduled_timestamp" not in message:
# This logic runs also for ScheduledMessage, which doesn't use
# the rendered_content_version field.
message["rendered_content_version"] = markdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering Markdown throwing an uncaught exception
# * rendering Markdown failing with the exception being
# caught in Markdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning(
"Error in Markdown rendering for message ID %s; continuing", message["id"]
) |
Returns the ids present in the current table | def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
return [item["id"] for item in data[table]] |
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of IDs to import the
converted Slack objects into the tables. | def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of IDs to import the
converted Slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval(%s) from generate_series(1, %s)", [sequence, count])
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query] |
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys. | def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
"""
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
"""
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name] |
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records) | def re_map_foreign_keys(
data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool = False,
id_field: bool = False,
recipient_field: bool = False,
) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert "usermessage" not in related_table
re_map_foreign_keys_internal(
data[table],
table,
field_name,
related_table,
verbose,
id_field,
recipient_field,
) |
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.) | def re_map_foreign_keys_internal(
data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool = False,
id_field: bool = False,
recipient_field: bool = False,
) -> None:
"""
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
"""
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item["type"] == 2:
pass
elif related_table == "user_profile" and item["type"] == 1:
pass
elif related_table == "huddle" and item["type"] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP["recipient_to_huddle_map"][item["id"]] = lookup_table[old_id]
else:
continue
old_id = item[field_name]
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info(
"Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id
)
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
item[field_name] = new_id |
Some tables, including Reaction and UserStatus, contain a form of
foreign key reference to the RealmEmoji table in the form of
`str(realm_emoji.id)` when `reaction_type="realm_emoji"`.
See the block comment for emoji_code in the AbstractEmoji
definition for more details. | def re_map_realm_emoji_codes(data: TableData, *, table_name: str) -> None:
"""
Some tables, including Reaction and UserStatus, contain a form of
foreign key reference to the RealmEmoji table in the form of
`str(realm_emoji.id)` when `reaction_type="realm_emoji"`.
See the block comment for emoji_code in the AbstractEmoji
definition for more details.
"""
realm_emoji_dct = {}
for row in data["zerver_realmemoji"]:
realm_emoji_dct[row["id"]] = row
for row in data[table_name]:
if row["reaction_type"] == Reaction.REALM_EMOJI:
old_realm_emoji_id = int(row["emoji_code"])
# Fail hard here if we didn't map correctly here
new_realm_emoji_id = ID_MAP["realmemoji"][old_realm_emoji_id]
# This is a very important sanity check.
realm_emoji_row = realm_emoji_dct[new_realm_emoji_id]
assert realm_emoji_row["name"] == row["emoji_name"]
# Now update emoji_code to the new id.
row["emoji_code"] = str(new_realm_emoji_id) |
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields. | def re_map_foreign_keys_many_to_many(
data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool = False,
) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose
)
item[field_name] = new_id_list
del item[field_name] |
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list. | def re_map_foreign_keys_many_to_many_internal(
table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool = False,
) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info(
"Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id
)
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list |
The recipient column shouldn't be imported, we'll set the correct values
when Recipient table gets imported. | def remove_denormalized_recipient_column_from_data(data: TableData) -> None:
"""
The recipient column shouldn't be imported, we'll set the correct values
when Recipient table gets imported.
"""
for stream_dict in data["zerver_stream"]:
if "recipient" in stream_dict:
del stream_dict["recipient"]
for user_profile_dict in data["zerver_userprofile"]:
if "recipient" in user_profile_dict:
del user_profile_dict["recipient"]
for huddle_dict in data["zerver_huddle"]:
if "recipient" in huddle_dict:
del huddle_dict["recipient"] |
E.g. (RealmDomain -> 'zerver_realmdomain') | def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table |
Should run only with settings.BILLING_ENABLED. Ensures that we only
enable authentication methods that are available without needing a plan.
If the organization upgrades to a paid plan, or gets a sponsorship,
they can enable the restricted authentication methods in their settings. | def disable_restricted_authentication_methods(data: TableData) -> None:
"""
Should run only with settings.BILLING_ENABLED. Ensures that we only
enable authentication methods that are available without needing a plan.
If the organization upgrades to a paid plan, or gets a sponsorship,
they can enable the restricted authentication methods in their settings.
"""
realm_authentication_methods = data["zerver_realmauthenticationmethod"]
non_restricted_methods = []
for auth_method in realm_authentication_methods:
if AUTH_BACKEND_NAME_MAP[auth_method["name"]].available_for_cloud_plans is None:
non_restricted_methods.append(auth_method)
else:
logging.warning("Dropped restricted authentication method: %s", auth_method["name"])
data["zerver_realmauthenticationmethod"] = non_restricted_methods |
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of date_sent, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms. | def get_incoming_message_ids(import_dir: Path, sort_by_date: bool) -> List[int]:
"""
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of date_sent, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
"""
if sort_by_date:
tups: List[Tuple[int, int]] = []
else:
message_ids: List[int] = []
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
# Aggressively free up memory.
del data["zerver_usermessage"]
for row in data["zerver_message"]:
# We truncate date_sent to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally misorder the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row["id"]
if sort_by_date:
date_sent = int(row["date_sent"])
tup = (date_sent, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids |
Given an email address, returns the initial password for that account, as
created by populate_db. | def initial_password(email: str) -> Optional[str]:
"""Given an email address, returns the initial password for that account, as
created by populate_db."""
if settings.INITIAL_PASSWORD_SALT is not None:
# We check settings.DEVELOPMENT, not settings.PRODUCTION,
# because some tests mock settings.PRODUCTION and then use
# self.login, which will call this function.
assert settings.DEVELOPMENT, "initial_password_salt should not be set in production."
encoded_key = (settings.INITIAL_PASSWORD_SALT + email).encode()
digest = hashlib.sha256(encoded_key).digest()
return base64.b64encode(digest)[:16].decode()
else:
# None as a password for a user tells Django to set an unusable password
return None |
Find the module name corresponding to where this record was logged.
Sadly `record.module` is just the innermost component of the full
module name, so we have to go reconstruct this ourselves. | def find_log_caller_module(record: logging.LogRecord) -> Optional[str]:
"""Find the module name corresponding to where this record was logged.
Sadly `record.module` is just the innermost component of the full
module name, so we have to go reconstruct this ourselves.
"""
# Repeat a search similar to that in logging.Logger.findCaller.
# The logging call should still be on the stack somewhere; search until
# we find something in the same source file, and that should give the
# right module name.
f = logging.currentframe()
while True:
if f.f_code.co_filename == record.pathname:
return f.f_globals.get("__name__")
if f.f_back is None:
return None
f = f.f_back |
Note: `filename` should be declared in zproject/computed_settings.py with zulip_path. | def log_to_file(
logger: Logger,
filename: str,
log_format: str = "%(asctime)s %(levelname)-8s %(message)s",
) -> None:
"""Note: `filename` should be declared in zproject/computed_settings.py with zulip_path."""
formatter = logging.Formatter(log_format)
handler = logging.FileHandler(filename)
handler.setFormatter(formatter)
logger.addHandler(handler) |
You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
The lock_message parameter should be passed by callers that are
planning to modify the Message object. This will use the SQL
`SELECT FOR UPDATE` feature to ensure that other processes cannot
delete the message during the current transaction, which is
important to prevent rare race conditions. Callers must only
pass lock_message when inside a @transaction.atomic block. | def access_message(
user_profile: UserProfile,
message_id: int,
lock_message: bool = False,
) -> Message:
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
The lock_message parameter should be passed by callers that are
planning to modify the Message object. This will use the SQL
`SELECT FOR UPDATE` feature to ensure that other processes cannot
delete the message during the current transaction, which is
important to prevent rare race conditions. Callers must only
pass lock_message when inside a @transaction.atomic block.
"""
try:
base_query = Message.objects.select_related(*Message.DEFAULT_SELECT_RELATED)
if lock_message:
# We want to lock only the `Message` row, and not the related fields
# because the `Message` row only has a possibility of races.
base_query = base_query.select_for_update(of=("self",))
message = base_query.get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
has_user_message = lambda: UserMessage.objects.filter(
user_profile=user_profile, message_id=message_id
).exists()
if has_message_access(user_profile, message, has_user_message=has_user_message):
return message
raise JsonableError(_("Invalid message(s)")) |
As access_message, but also returns the usermessage, if any. | def access_message_and_usermessage(
user_profile: UserProfile,
message_id: int,
lock_message: bool = False,
) -> Tuple[Message, Optional[UserMessage]]:
"""As access_message, but also returns the usermessage, if any."""
try:
base_query = Message.objects.select_related(*Message.DEFAULT_SELECT_RELATED)
if lock_message:
# We want to lock only the `Message` row, and not the related fields
# because the `Message` row only has a possibility of races.
base_query = base_query.select_for_update(of=("self",))
message = base_query.get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
user_message = get_usermessage_by_message_id(user_profile, message_id)
has_user_message = lambda: user_message is not None
if has_message_access(user_profile, message, has_user_message=has_user_message):
return (message, user_message)
raise JsonableError(_("Invalid message(s)")) |
Access control method for unauthenticated requests interacting
with a message in web-public streams. | def access_web_public_message(
realm: Realm,
message_id: int,
) -> Message:
"""Access control method for unauthenticated requests interacting
with a message in web-public streams.
"""
# We throw a MissingAuthenticationError for all errors in this
# code path, to avoid potentially leaking information on whether a
# message with the provided ID exists on the server if the client
# shouldn't have access to it.
if not realm.web_public_streams_enabled():
raise MissingAuthenticationError
try:
message = Message.objects.select_related(*Message.DEFAULT_SELECT_RELATED).get(id=message_id)
except Message.DoesNotExist:
raise MissingAuthenticationError
if not message.is_stream_message():
raise MissingAuthenticationError
queryset = get_web_public_streams_queryset(realm)
try:
stream = queryset.get(id=message.recipient.type_id)
except Stream.DoesNotExist:
raise MissingAuthenticationError
# These should all have been enforced by the code in
# get_web_public_streams_queryset
assert stream.is_web_public
assert not stream.deactivated
assert not stream.invite_only
assert stream.history_public_to_subscribers
# Now that we've confirmed this message was sent to the target
# web-public stream, we can return it as having been successfully
# accessed.
return message |
Returns whether a user has access to a given message.
* The user_message parameter must be provided if the user has a UserMessage
row for the target message.
* The optional stream parameter is validated; is_subscribed is not. | def has_message_access(
user_profile: UserProfile,
message: Message,
*,
has_user_message: Callable[[], bool],
stream: Optional[Stream] = None,
is_subscribed: Optional[bool] = None,
) -> bool:
"""
Returns whether a user has access to a given message.
* The user_message parameter must be provided if the user has a UserMessage
row for the target message.
* The optional stream parameter is validated; is_subscribed is not.
"""
if message.recipient.type != Recipient.STREAM:
# You can only access direct messages you received
return has_user_message()
if stream is None:
stream = Stream.objects.get(id=message.recipient.type_id)
else:
assert stream.recipient_id == message.recipient_id
if stream.realm_id != user_profile.realm_id:
# You can't access public stream messages in other realms
return False
def is_subscribed_helper() -> bool:
if is_subscribed is not None:
return is_subscribed
return Subscription.objects.filter(
user_profile=user_profile, active=True, recipient=message.recipient
).exists()
if stream.is_public() and user_profile.can_access_public_streams():
return True
if not stream.is_history_public_to_subscribers():
# Unless history is public to subscribers, you need to both:
# (1) Have directly received the message.
# AND
# (2) Be subscribed to the stream.
return has_user_message() and is_subscribed_helper()
# is_history_public_to_subscribers, so check if you're subscribed
return is_subscribed_helper() |
This function does the full has_message_access check for each
message. If stream is provided, it is used to avoid unnecessary
database queries, and will use exactly 2 bulk queries instead.
Throws AssertionError if stream is passed and any of the messages
were not sent to that stream. | def bulk_access_messages(
user_profile: UserProfile,
messages: Collection[Message] | QuerySet[Message],
*,
stream: Optional[Stream] = None,
) -> List[Message]:
"""This function does the full has_message_access check for each
message. If stream is provided, it is used to avoid unnecessary
database queries, and will use exactly 2 bulk queries instead.
Throws AssertionError if stream is passed and any of the messages
were not sent to that stream.
"""
filtered_messages = []
user_message_set = set(
get_messages_with_usermessage_rows_for_user(
user_profile.id, [message.id for message in messages]
)
)
if stream is None:
streams = {
stream.recipient_id: stream
for stream in Stream.objects.filter(
id__in={
message.recipient.type_id
for message in messages
if message.recipient.type == Recipient.STREAM
}
)
}
subscribed_recipient_ids = set(get_subscribed_stream_recipient_ids_for_user(user_profile))
for message in messages:
is_subscribed = message.recipient_id in subscribed_recipient_ids
if has_message_access(
user_profile,
message,
has_user_message=partial(lambda m: m.id in user_message_set, message),
stream=streams.get(message.recipient_id) if stream is None else stream,
is_subscribed=is_subscribed,
):
filtered_messages.append(message)
return filtered_messages |
This function mirrors bulk_access_messages, above, but applies the
limits to a QuerySet and returns a new QuerySet which only
contains messages in the given stream which the user can access.
Note that this only works with streams. It may return an empty
QuerySet if the user has access to no messages (for instance, for
a private stream which the user is not subscribed to). | def bulk_access_stream_messages_query(
user_profile: UserProfile, messages: QuerySet[Message], stream: Stream
) -> QuerySet[Message]:
"""This function mirrors bulk_access_messages, above, but applies the
limits to a QuerySet and returns a new QuerySet which only
contains messages in the given stream which the user can access.
Note that this only works with streams. It may return an empty
QuerySet if the user has access to no messages (for instance, for
a private stream which the user is not subscribed to).
"""
messages = messages.filter(realm_id=user_profile.realm_id, recipient_id=stream.recipient_id)
if stream.is_public() and user_profile.can_access_public_streams():
return messages
if not Subscription.objects.filter(
user_profile=user_profile, active=True, recipient=stream.recipient
).exists():
return Message.objects.none()
if not stream.is_history_public_to_subscribers():
messages = messages.annotate(
has_usermessage=Exists(
UserMessage.objects.filter(
user_profile_id=user_profile.id, message_id=OuterRef("id")
)
)
).filter(has_usermessage=1)
return messages |
Returns a subset of `message_ids` containing only messages the
user has a UserMessage for. Makes O(1) database queries.
Note that this is not sufficient for access verification for
stream messages.
See `access_message`, `bulk_access_messages` for proper message access
checks that follow our security model. | def get_messages_with_usermessage_rows_for_user(
user_profile_id: int, message_ids: Sequence[int]
) -> ValuesQuerySet[UserMessage, int]:
"""
Returns a subset of `message_ids` containing only messages the
user has a UserMessage for. Makes O(1) database queries.
Note that this is not sufficient for access verification for
stream messages.
See `access_message`, `bulk_access_messages` for proper message access
checks that follow our security model.
"""
return UserMessage.objects.filter(
user_profile_id=user_profile_id,
message_id__in=message_ids,
).values_list("message_id", flat=True) |
Helper for doing lookups of the recipient_id that
get_recent_private_conversations would have used to record that
message in its data structure. | def get_recent_conversations_recipient_id(
user_profile: UserProfile, recipient_id: int, sender_id: int
) -> int:
"""Helper for doing lookups of the recipient_id that
get_recent_private_conversations would have used to record that
message in its data structure.
"""
my_recipient_id = user_profile.recipient_id
if recipient_id == my_recipient_id:
return UserProfile.objects.values_list("recipient_id", flat=True).get(id=sender_id)
return recipient_id |
This function uses some carefully optimized SQL queries, designed
to use the UserMessage index on private_messages. It is
somewhat complicated by the fact that for 1:1 direct
messages, we store the message against a recipient_id of whichever
user was the recipient, and thus for 1:1 direct messages sent
directly to us, we need to look up the other user from the
sender_id on those messages. You'll see that pattern repeated
both here and also in zerver/lib/events.py.
It may be possible to write this query directly in Django, however
it is made much easier by using CTEs, which Django does not
natively support.
We return a dictionary structure for convenient modification
below; this structure is converted into its final form by
post_process. | def get_recent_private_conversations(user_profile: UserProfile) -> Dict[int, Dict[str, Any]]:
"""This function uses some carefully optimized SQL queries, designed
to use the UserMessage index on private_messages. It is
somewhat complicated by the fact that for 1:1 direct
messages, we store the message against a recipient_id of whichever
user was the recipient, and thus for 1:1 direct messages sent
directly to us, we need to look up the other user from the
sender_id on those messages. You'll see that pattern repeated
both here and also in zerver/lib/events.py.
It may be possible to write this query directly in Django, however
it is made much easier by using CTEs, which Django does not
natively support.
We return a dictionary structure for convenient modification
below; this structure is converted into its final form by
post_process.
"""
RECENT_CONVERSATIONS_LIMIT = 1000
recipient_map = {}
my_recipient_id = user_profile.recipient_id
query = SQL(
"""
WITH personals AS (
SELECT um.message_id AS message_id
FROM zerver_usermessage um
WHERE um.user_profile_id = %(user_profile_id)s
AND um.flags & 2048 <> 0
ORDER BY message_id DESC limit %(conversation_limit)s
),
message AS (
SELECT message_id,
CASE
WHEN m.recipient_id = %(my_recipient_id)s
THEN m.sender_id
ELSE NULL
END AS sender_id,
CASE
WHEN m.recipient_id <> %(my_recipient_id)s
THEN m.recipient_id
ELSE NULL
END AS outgoing_recipient_id
FROM personals
JOIN zerver_message m
ON personals.message_id = m.id
),
unified AS (
SELECT message_id,
COALESCE(zerver_userprofile.recipient_id, outgoing_recipient_id) AS other_recipient_id
FROM message
LEFT JOIN zerver_userprofile
ON zerver_userprofile.id = sender_id
)
SELECT other_recipient_id,
MAX(message_id)
FROM unified
GROUP BY other_recipient_id
"""
)
with connection.cursor() as cursor:
cursor.execute(
query,
{
"user_profile_id": user_profile.id,
"conversation_limit": RECENT_CONVERSATIONS_LIMIT,
"my_recipient_id": my_recipient_id,
},
)
rows = cursor.fetchall()
# The resulting rows will be (recipient_id, max_message_id)
# objects for all parties we've had recent (group?) private
# message conversations with, including direct messages with
# yourself (those will generate an empty list of user_ids).
for recipient_id, max_message_id in rows:
recipient_map[recipient_id] = dict(
max_message_id=max_message_id,
user_ids=[],
)
# Now we need to map all the recipient_id objects to lists of user IDs
for recipient_id, user_profile_id in (
Subscription.objects.filter(recipient_id__in=recipient_map.keys())
.exclude(user_profile_id=user_profile.id)
.values_list("recipient_id", "user_profile_id")
):
recipient_map[recipient_id]["user_ids"].append(user_profile_id)
# Sort to prevent test flakes and client bugs.
for rec in recipient_map.values():
rec["user_ids"].sort()
return recipient_map |
Helper function for 'topic_wildcard_mention_allowed' and
'stream_wildcard_mention_allowed' to check if the sender is allowed to use
wildcard mentions based on the 'wildcard_mention_policy' setting of that realm.
This check is used only if the participants count in the topic or the subscribers
count in the stream is greater than 'Realm.WILDCARD_MENTION_THRESHOLD'. | def wildcard_mention_policy_authorizes_user(sender: UserProfile, realm: Realm) -> bool:
"""Helper function for 'topic_wildcard_mention_allowed' and
'stream_wildcard_mention_allowed' to check if the sender is allowed to use
wildcard mentions based on the 'wildcard_mention_policy' setting of that realm.
This check is used only if the participants count in the topic or the subscribers
count in the stream is greater than 'Realm.WILDCARD_MENTION_THRESHOLD'.
"""
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_NOBODY:
return False
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_EVERYONE:
return True
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_ADMINS:
return sender.is_realm_admin
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MODERATORS:
return sender.is_realm_admin or sender.is_moderator
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS:
return sender.is_realm_admin or (not sender.is_provisional_member and not sender.is_guest)
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MEMBERS:
return not sender.is_guest
raise AssertionError("Invalid wildcard mention policy") |
This function determines the visibility policy to set when a user
participates in a topic, depending on the 'automatically_follow_topics_policy'
and 'automatically_unmute_topics_in_muted_streams_policy' settings. | def visibility_policy_for_participation(
sender: UserProfile,
is_stream_muted: Optional[bool],
) -> Optional[int]:
"""
This function determines the visibility policy to set when a user
participates in a topic, depending on the 'automatically_follow_topics_policy'
and 'automatically_unmute_topics_in_muted_streams_policy' settings.
"""
if (
sender.automatically_follow_topics_policy
== UserProfile.AUTOMATICALLY_CHANGE_VISIBILITY_POLICY_ON_PARTICIPATION
):
return UserTopic.VisibilityPolicy.FOLLOWED
if (
is_stream_muted
and sender.automatically_unmute_topics_in_muted_streams_policy
== UserProfile.AUTOMATICALLY_CHANGE_VISIBILITY_POLICY_ON_PARTICIPATION
):
return UserTopic.VisibilityPolicy.UNMUTED
return None |
This function determines the visibility policy to set when a message
is sent to a topic, depending on the 'automatically_follow_topics_policy'
and 'automatically_unmute_topics_in_muted_streams_policy' settings.
It returns None when the policies can't make it more visible than the
current visibility policy. | def visibility_policy_for_send_message(
sender: UserProfile,
message: Message,
stream: Stream,
is_stream_muted: Optional[bool],
current_visibility_policy: int,
) -> Optional[int]:
"""
This function determines the visibility policy to set when a message
is sent to a topic, depending on the 'automatically_follow_topics_policy'
and 'automatically_unmute_topics_in_muted_streams_policy' settings.
It returns None when the policies can't make it more visible than the
current visibility policy.
"""
# We prioritize 'FOLLOW' over 'UNMUTE' in muted streams.
# We need to carefully handle the following two cases:
#
# 1. When an action qualifies for multiple values. Example:
# - starting a topic is INITIATION, PARTICIPATION as well as SEND
# - sending a non-first message is PARTICIPATION as well as SEND
# action | 'automatically_follow_topics_policy' | 'automatically_unmute_topics_in_muted_streams_policy' | visibility_policy
# start | ON_PARTICIPATION / ON_SEND | ON_INITIATION | FOLLOWED
# send | ON_SEND / ON_PARTICIPATION | ON_PARTICIPATION / ON_SEND | FOLLOWED
#
# 2. When both the policies have the same values.
# action | 'automatically_follow_topics_policy' | 'automatically_unmute_topics_in_muted_streams_policy' | visibility_policy
# start | ON_INITIATION | ON_INITIATION | FOLLOWED
# partc | ON_PARTICIPATION | ON_PARTICIPATION | FOLLOWED
# send | ON_SEND | ON_SEND | FOLLOWED
visibility_policy = None
if current_visibility_policy == UserTopic.VisibilityPolicy.FOLLOWED:
return visibility_policy
visibility_policy_participation = visibility_policy_for_participation(sender, is_stream_muted)
visibility_policy_send = visibility_policy_for_send(sender, is_stream_muted)
if UserTopic.VisibilityPolicy.FOLLOWED in (
visibility_policy_participation,
visibility_policy_send,
):
return UserTopic.VisibilityPolicy.FOLLOWED
if UserTopic.VisibilityPolicy.UNMUTED in (
visibility_policy_participation,
visibility_policy_send,
):
visibility_policy = UserTopic.VisibilityPolicy.UNMUTED
# If a topic has a visibility policy set, it can't be the case
# of initiation. We return early, thus saving a DB query.
if current_visibility_policy != UserTopic.VisibilityPolicy.INHERIT:
if visibility_policy and current_visibility_policy == visibility_policy:
return None
return visibility_policy
# Now we need to check if the user initiated the topic.
old_accessible_messages_in_topic: Union[QuerySet[Message], QuerySet[UserMessage]]
if can_access_stream_history(sender, stream):
old_accessible_messages_in_topic = messages_for_topic(
realm_id=sender.realm_id,
stream_recipient_id=message.recipient_id,
topic_name=message.topic_name(),
).exclude(id=message.id)
else:
# We use the user's own message access to avoid leaking information in
# private streams with protected history.
old_accessible_messages_in_topic = UserMessage.objects.filter(
user_profile=sender,
message__recipient_id=message.recipient_id,
message__subject__iexact=message.topic_name(),
).exclude(message_id=message.id)
if (
sender.automatically_follow_topics_policy
== UserProfile.AUTOMATICALLY_CHANGE_VISIBILITY_POLICY_ON_INITIATION
and not old_accessible_messages_in_topic.exists()
):
return UserTopic.VisibilityPolicy.FOLLOWED
if (
is_stream_muted
and sender.automatically_unmute_topics_in_muted_streams_policy
== UserProfile.AUTOMATICALLY_CHANGE_VISIBILITY_POLICY_ON_INITIATION
and not old_accessible_messages_in_topic.exists()
):
visibility_policy = UserTopic.VisibilityPolicy.UNMUTED
return visibility_policy |
If the user can set a visibility policy. | def set_visibility_policy_possible(user_profile: UserProfile, message: Message) -> bool:
"""If the user can set a visibility policy."""
if not message.is_stream_message():
return False
if user_profile.is_bot:
return False
if user_profile.realm != message.get_realm():
return False
return True |
Given a iterable of messages and reactions stitch reactions
into messages. | def sew_messages_and_reactions(
messages: List[Dict[str, Any]], reactions: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message["reactions"] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message["id"]: message for message in messages}
for reaction in reactions:
converted_messages[reaction["message_id"]]["reactions"].append(reaction)
return list(converted_messages.values()) |
Updates the message as stored in the to_dict cache (for serving
messages). | def update_message_cache(
changed_messages: Iterable[Message], realm_id: Optional[int] = None
) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.messages_to_encoded_cache(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids |
Given two hex strings of equal length, return a hex string with
the bitwise xor of the two hex strings. | def xor_hex_strings(bytes_a: str, bytes_b: str) -> str:
"""Given two hex strings of equal length, return a hex string with
the bitwise xor of the two hex strings."""
assert len(bytes_a) == len(bytes_b)
return "".join(f"{int(x, 16) ^ int(y, 16):x}" for x, y in zip(bytes_a, bytes_b)) |
Given an ascii string, encode it as a hex string | def ascii_to_hex(input_string: str) -> str:
"""Given an ascii string, encode it as a hex string"""
return input_string.encode().hex() |
Given a hex array, decode it back to a string | def hex_to_ascii(input_string: str) -> str:
"""Given a hex array, decode it back to a string"""
return bytes.fromhex(input_string).decode() |
This is kind of the inverse of `get_user_mutes` above.
While `get_user_mutes` is mainly used for event system work,
this is used in the message send codepath, to get a list
of IDs of users who have muted a particular user.
The result will also include deactivated users. | def get_muting_users(muted_user_id: int) -> Set[int]:
"""
This is kind of the inverse of `get_user_mutes` above.
While `get_user_mutes` is mainly used for event system work,
this is used in the message send codepath, to get a list
of IDs of users who have muted a particular user.
The result will also include deactivated users.
"""
rows = MutedUser.objects.filter(
muted_user_id=muted_user_id,
).values("user_profile_id")
return {row["user_profile_id"] for row in rows} |
Given the anchor and use_first_unread_anchor parameters passed by
the client, computes what anchor value the client requested,
handling backwards-compatibility and the various string-valued
fields. We encode use_first_unread_anchor as anchor=None. | def parse_anchor_value(anchor_val: Optional[str], use_first_unread_anchor: bool) -> Optional[int]:
"""Given the anchor and use_first_unread_anchor parameters passed by
the client, computes what anchor value the client requested,
handling backwards-compatibility and the various string-valued
fields. We encode use_first_unread_anchor as anchor=None.
"""
if use_first_unread_anchor:
# Backwards-compatibility: Before we added support for the
# special string-typed anchor values, clients would pass
# anchor=None and use_first_unread_anchor=True to indicate
# what is now expressed as anchor="first_unread".
return None
if anchor_val is None:
# Throw an exception if neither an anchor argument not
# use_first_unread_anchor was specified.
raise JsonableError(_("Missing 'anchor' argument."))
if anchor_val == "oldest":
return 0
if anchor_val == "newest":
return LARGER_THAN_MAX_MESSAGE_ID
if anchor_val == "first_unread":
return None
try:
# We don't use `.isnumeric()` to support negative numbers for
# anchor. We don't recommend it in the API (if you want the
# very first message, use 0 or 1), but it used to be supported
# and was used by the web app, so we need to continue
# supporting it for backwards-compatibility
anchor = int(anchor_val)
if anchor < 0:
return 0
elif anchor > LARGER_THAN_MAX_MESSAGE_ID:
return LARGER_THAN_MAX_MESSAGE_ID
return anchor
except ValueError:
raise JsonableError(_("Invalid anchor")) |
This code is actually generic enough that we could move it to a
library, but our only caller for now is message search. | def limit_query_to_range(
query: Select,
num_before: int,
num_after: int,
anchor: int,
include_anchor: bool,
anchored_to_left: bool,
anchored_to_right: bool,
id_col: ColumnElement[Integer],
first_visible_message_id: int,
) -> SelectBase:
"""
This code is actually generic enough that we could move it to a
library, but our only caller for now is message search.
"""
need_before_query = (not anchored_to_left) and (num_before > 0)
need_after_query = (not anchored_to_right) and (num_after > 0)
need_both_sides = need_before_query and need_after_query
# The semantics of our flags are as follows:
#
# num_before = number of rows < anchor
# num_after = number of rows > anchor
#
# But we may also want the row where id == anchor (if it exists),
# and we don't want to union up to 3 queries. So in some cases
# we do things like `after_limit = num_after + 1` to grab the
# anchor row in the "after" query.
#
# Note that in some cases, if the anchor row isn't found, we
# actually may fetch an extra row at one of the extremes.
if need_both_sides:
before_anchor = anchor - 1
after_anchor = max(anchor, first_visible_message_id)
before_limit = num_before
after_limit = num_after + 1
elif need_before_query:
before_anchor = anchor - (not include_anchor)
before_limit = num_before
if not anchored_to_right:
before_limit += include_anchor
elif need_after_query:
after_anchor = max(anchor + (not include_anchor), first_visible_message_id)
after_limit = num_after + include_anchor
if need_before_query:
before_query = query
if not anchored_to_right:
before_query = before_query.where(id_col <= before_anchor)
before_query = before_query.order_by(id_col.desc())
before_query = before_query.limit(before_limit)
if need_after_query:
after_query = query
if not anchored_to_left:
after_query = after_query.where(id_col >= after_anchor)
after_query = after_query.order_by(id_col.asc())
after_query = after_query.limit(after_limit)
if need_both_sides:
return union_all(before_query.self_group(), after_query.self_group())
elif need_before_query:
return before_query
elif need_after_query:
return after_query
else:
# If we don't have either a before_query or after_query, it's because
# some combination of num_before/num_after/anchor are zero or
# use_first_unread_anchor logic found no unread messages.
#
# The most likely reason is somebody is doing an id search, so searching
# for something like `message_id = 42` is exactly what we want. In other
# cases, which could possibly be buggy API clients, at least we will
# return at most one row here.
return query.where(id_col == anchor) |
This method assumes that the callers are in our event-handling codepath, and
therefore as of summer 2023, they do not yet support the "negated" flag. | def narrow_dataclasses_from_tuples(tups: Collection[Sequence[str]]) -> Collection[NarrowTerm]:
"""
This method assumes that the callers are in our event-handling codepath, and
therefore as of summer 2023, they do not yet support the "negated" flag.
"""
return [NarrowTerm(operator=tup[0], operand=tup[1]) for tup in tups] |
Changes to this function should come with corresponding changes to
NarrowLibraryTest. | def build_narrow_predicate(
narrow: Collection[NarrowTerm],
) -> NarrowPredicate:
"""Changes to this function should come with corresponding changes to
NarrowLibraryTest."""
check_narrow_for_events(narrow)
def narrow_predicate(*, message: Dict[str, Any], flags: List[str]) -> bool:
def satisfies_operator(*, operator: str, operand: str) -> bool:
if operator in channel_operators:
if message["type"] != "stream":
return False
if operand.lower() != message["display_recipient"].lower():
return False
elif operator == "topic":
if message["type"] != "stream":
return False
topic_name = get_topic_from_message_info(message)
if operand.lower() != topic_name.lower():
return False
elif operator == "sender":
if operand.lower() != message["sender_email"].lower():
return False
elif operator == "is" and operand in ["dm", "private"]:
# "is:private" is a legacy alias for "is:dm"
if message["type"] != "private":
return False
elif operator == "is" and operand in ["starred"]:
if operand not in flags:
return False
elif operator == "is" and operand == "unread":
if "read" in flags:
return False
elif operator == "is" and operand in ["alerted", "mentioned"]:
if "mentioned" not in flags:
return False
elif operator == "is" and operand == "resolved":
if message["type"] != "stream":
return False
topic_name = get_topic_from_message_info(message)
if not topic_name.startswith(RESOLVED_TOPIC_PREFIX):
return False
return True
for narrow_term in narrow:
# TODO: Eventually handle negated narrow terms.
if not satisfies_operator(operator=narrow_term.operator, operand=narrow_term.operand):
return False
return True
return narrow_predicate |
Captures the hierarchy of notification settings, where visibility policy is considered first,
followed by stream-specific settings, and the global-setting in the UserProfile is the fallback. | def user_allows_notifications_in_StreamTopic(
stream_is_muted: bool,
visibility_policy: int,
stream_specific_setting: Optional[bool],
global_setting: bool,
) -> bool:
"""
Captures the hierarchy of notification settings, where visibility policy is considered first,
followed by stream-specific settings, and the global-setting in the UserProfile is the fallback.
"""
if stream_is_muted and visibility_policy != UserTopic.VisibilityPolicy.UNMUTED:
return False
if visibility_policy == UserTopic.VisibilityPolicy.MUTED:
return False
if stream_specific_setting is not None:
return stream_specific_setting
return global_setting |
Returns the user group name to display in the email notification
if user group(s) are mentioned.
This implements the same algorithm as get_user_group_mentions_data
in zerver/lib/notification_data.py, but we're passed a list of
messages instead. | def get_mentioned_user_group(
messages: List[Dict[str, Any]], user_profile: UserProfile
) -> Optional[MentionedUserGroup]:
"""Returns the user group name to display in the email notification
if user group(s) are mentioned.
This implements the same algorithm as get_user_group_mentions_data
in zerver/lib/notification_data.py, but we're passed a list of
messages instead.
"""
for message in messages:
if (
message.get("mentioned_user_group_id") is None
and message["trigger"] == NotificationTriggers.MENTION
):
# The user has also been personally mentioned, so that gets prioritized.
return None
# These IDs are those of the smallest user groups mentioned in each message.
mentioned_user_group_ids = [
message["mentioned_user_group_id"]
for message in messages
if message.get("mentioned_user_group_id") is not None
]
if len(mentioned_user_group_ids) == 0:
return None
# We now want to calculate the name of the smallest user group mentioned among
# all these messages.
smallest_user_group_size = math.inf
for user_group_id in mentioned_user_group_ids:
current_user_group = NamedUserGroup.objects.get(id=user_group_id, realm=user_profile.realm)
current_mentioned_user_group = MentionedUserGroup(
id=current_user_group.id,
name=current_user_group.name,
members_count=len(get_user_group_member_ids(current_user_group)),
)
if current_mentioned_user_group.members_count < smallest_user_group_size:
# If multiple user groups are mentioned, we prefer the
# user group with the least members.
smallest_user_group_size = current_mentioned_user_group.members_count
smallest_mentioned_user_group = current_mentioned_user_group
return smallest_mentioned_user_group |
This checks if there is any realm internal bot missing.
If that is the case, it creates the missing realm internal bots. | def create_if_missing_realm_internal_bots() -> None:
"""This checks if there is any realm internal bot missing.
If that is the case, it creates the missing realm internal bots.
"""
if missing_any_realm_internal_bots():
for realm in Realm.objects.all():
setup_realm_internal_bots(realm) |
Given the send_request object for a direct message from the user
to welcome-bot, trigger the welcome-bot reply. | def send_welcome_bot_response(send_request: SendMessageRequest) -> None:
"""Given the send_request object for a direct message from the user
to welcome-bot, trigger the welcome-bot reply."""
welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.realm.id)
human_response_lower = send_request.message.content.lower()
content = select_welcome_bot_response(human_response_lower)
internal_send_private_message(
welcome_bot,
send_request.message.sender,
content,
# Note: Welcome bot doesn't trigger email/push notifications,
# as this is intended to be seen contextually in the application.
disable_external_notifications=True,
) |
bot_id is the user_id of the bot sending the response
message_info is used to address the message and should have these fields:
type - "stream" or "private"
display_recipient - like we have in other message events
topic - see get_topic_from_message_info
response_data is what the bot wants to send back and has these fields:
content - raw Markdown content for Zulip to render
WARNING: This function sends messages bypassing the stream access check
for the bot - so use with caution to not call this in codepaths
that might let someone send arbitrary messages to any stream through this. | def send_response_message(
bot_id: int, message_info: Dict[str, Any], response_data: Dict[str, Any]
) -> None:
"""
bot_id is the user_id of the bot sending the response
message_info is used to address the message and should have these fields:
type - "stream" or "private"
display_recipient - like we have in other message events
topic - see get_topic_from_message_info
response_data is what the bot wants to send back and has these fields:
content - raw Markdown content for Zulip to render
WARNING: This function sends messages bypassing the stream access check
for the bot - so use with caution to not call this in codepaths
that might let someone send arbitrary messages to any stream through this.
"""
recipient_type_name = message_info["type"]
display_recipient = message_info["display_recipient"]
try:
topic_name: Optional[str] = get_topic_from_message_info(message_info)
except KeyError:
topic_name = None
bot_user = get_user_profile_by_id(bot_id)
realm = bot_user.realm
client = get_client("OutgoingWebhookResponse")
content = response_data.get("content")
assert content
widget_content = response_data.get("widget_content")
if recipient_type_name == "stream":
message_to = [display_recipient]
elif recipient_type_name == "private":
message_to = [recipient["email"] for recipient in display_recipient]
else:
raise JsonableError(_("Invalid message type"))
check_send_message(
sender=bot_user,
client=client,
recipient_type_name=recipient_type_name,
message_to=message_to,
topic_name=topic_name,
message_content=content,
widget_content=widget_content,
realm=realm,
skip_stream_access_check=True,
) |
Returns response of call if no exception occurs. | def do_rest_call(
base_url: str,
event: Dict[str, Any],
service_handler: OutgoingWebhookServiceInterface,
) -> Optional[Response]:
"""Returns response of call if no exception occurs."""
try:
start_time = perf_counter()
bot_profile = service_handler.user_profile
response = service_handler.make_request(
base_url,
event,
bot_profile.realm,
)
logging.info(
"Outgoing webhook request from %s@%s took %f seconds",
bot_profile.id,
bot_profile.realm.string_id,
perf_counter() - start_time,
)
if response is None:
return None
if str(response.status_code).startswith("2"):
try:
process_success_response(event, service_handler, response)
except JsonableError as e:
response_message = e.msg
logging.info("Outhook trigger failed:", stack_info=True)
fail_with_message(event, response_message)
response_message = f"The outgoing webhook server attempted to send a message in Zulip, but that request resulted in the following error:\n> {e}"
notify_bot_owner(
event, response_content=response.text, failure_message=response_message
)
return None
else:
logging.warning(
"Message %(message_url)s triggered an outgoing webhook, returning status "
'code %(status_code)s.\n Content of response (in quotes): "'
'%(response)s"',
{
"message_url": get_message_url(event),
"status_code": response.status_code,
"response": response.text,
},
)
failure_message = f"Third party responded with {response.status_code}"
fail_with_message(event, failure_message)
notify_bot_owner(event, response.status_code, response.content)
return response
except requests.exceptions.Timeout:
logging.info(
"Trigger event %s on %s timed out. Retrying",
event["command"],
event["service_name"],
)
failure_message = (
f"Request timed out after {settings.OUTGOING_WEBHOOK_TIMEOUT_SECONDS} seconds."
)
request_retry(event, failure_message=failure_message)
return None
except requests.exceptions.ConnectionError:
logging.info(
"Trigger event %s on %s resulted in a connection error. Retrying",
event["command"],
event["service_name"],
)
failure_message = "A connection error occurred. Is my bot server down?"
request_retry(event, failure_message=failure_message)
return None
except requests.exceptions.RequestException as e:
response_message = (
f"An exception of type *{type(e).__name__}* occurred for message `{event['command']}`! "
"See the Zulip server logs for more information."
)
logging.exception("Outhook trigger failed:", stack_info=True)
fail_with_message(event, response_message)
notify_bot_owner(event, exception=e)
return None |
Our data models support UserPresence objects not having None
values for last_active_time/last_connected_time. The legacy API
however has always sent timestamps, so for backward
compatibility we cannot send such values through the API and need
to default to a sane
This helper functions expects to take a last_active_time or
last_connected_time value and the date_joined of the user, which
will serve as the default value if the first argument is None. | def user_presence_datetime_with_date_joined_default(
dt: Optional[datetime], date_joined: datetime
) -> datetime:
"""
Our data models support UserPresence objects not having None
values for last_active_time/last_connected_time. The legacy API
however has always sent timestamps, so for backward
compatibility we cannot send such values through the API and need
to default to a sane
This helper functions expects to take a last_active_time or
last_connected_time value and the date_joined of the user, which
will serve as the default value if the first argument is None.
"""
if dt is None:
return date_joined
return dt |
Reformats the modern UserPresence data structure so that legacy
API clients can still access presence data.
We expect this code to remain mostly unchanged until we can delete it. | def get_legacy_user_presence_info(
last_active_time: datetime, last_connected_time: datetime
) -> Dict[str, Any]:
"""
Reformats the modern UserPresence data structure so that legacy
API clients can still access presence data.
We expect this code to remain mostly unchanged until we can delete it.
"""
# Now we put things together in the legacy presence format with
# one client + an `aggregated` field.
#
# TODO: Look at whether we can drop to just the "aggregated" field
# if no clients look at the rest.
most_recent_info = format_legacy_presence_dict(last_active_time, last_connected_time)
result = {}
# The word "aggregated" here is possibly misleading.
# It's really just the most recent client's info.
result["aggregated"] = dict(
client=most_recent_info["client"],
status=most_recent_info["status"],
timestamp=most_recent_info["timestamp"],
)
result["website"] = most_recent_info
return result |
This function assumes it's being called right after the presence object was updated,
and is not meant to be used on old presence data. | def format_legacy_presence_dict(
last_active_time: datetime, last_connected_time: datetime
) -> Dict[str, Any]:
"""
This function assumes it's being called right after the presence object was updated,
and is not meant to be used on old presence data.
"""
if (
last_active_time
+ timedelta(seconds=settings.PRESENCE_LEGACY_EVENT_OFFSET_FOR_ACTIVITY_SECONDS)
>= last_connected_time
):
status = UserPresence.LEGACY_STATUS_ACTIVE
timestamp = datetime_to_timestamp(last_active_time)
else:
status = UserPresence.LEGACY_STATUS_IDLE
timestamp = datetime_to_timestamp(last_connected_time)
# This field was never used by clients of the legacy API, so we
# just set it to a fixed value for API format compatibility.
pushable = False
return dict(client="website", status=status, timestamp=timestamp, pushable=pushable) |
This decorator should obviously be used only in a dev environment.
It works best when surrounding a function that you expect to be
called once. One strategy is to write a backend test and wrap the
test case with the profiled decorator.
You can run a single test case like this:
# edit zerver/tests/test_external.py and place @profiled above the test case below
./tools/test-backend zerver.tests.test_external.RateLimitTests.test_ratelimit_decrease
Then view the results like this:
./tools/show-profile-results test_ratelimit_decrease.profile | def profiled(func: Callable[ParamT, ReturnT]) -> Callable[ParamT, ReturnT]:
"""
This decorator should obviously be used only in a dev environment.
It works best when surrounding a function that you expect to be
called once. One strategy is to write a backend test and wrap the
test case with the profiled decorator.
You can run a single test case like this:
# edit zerver/tests/test_external.py and place @profiled above the test case below
./tools/test-backend zerver.tests.test_external.RateLimitTests.test_ratelimit_decrease
Then view the results like this:
./tools/show-profile-results test_ratelimit_decrease.profile
"""
@wraps(func)
def wrapped_func(*args: ParamT.args, **kwargs: ParamT.kwargs) -> ReturnT:
fn = func.__name__ + ".profile"
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
prof.dump_stats(fn)
return retval
return wrapped_func |
Never use this function outside of the push-notifications
codepath. Most of our code knows how to get streams
up front in a more efficient manner. | def get_message_stream_name_from_database(message: Message) -> str:
"""
Never use this function outside of the push-notifications
codepath. Most of our code knows how to get streams
up front in a more efficient manner.
"""
stream_id = message.recipient.type_id
return Stream.objects.get(id=stream_id).name |
Take a payload in an unknown Zulip version's format, and return in current format. | def modernize_apns_payload(data: Mapping[str, Any]) -> Mapping[str, Any]:
"""Take a payload in an unknown Zulip version's format, and return in current format."""
# TODO this isn't super robust as is -- if a buggy remote server
# sends a malformed payload, we are likely to raise an exception.
if "message_ids" in data:
# The format sent by 1.6.0, from the earliest pre-1.6.0
# version with bouncer support up until 613d093d7 pre-1.7.0:
# 'alert': str, # just sender, and text about direct message/mention
# 'message_ids': List[int], # always just one
return {
"alert": data["alert"],
"badge": 0,
"custom": {
"zulip": {
"message_ids": data["message_ids"],
},
},
}
else:
# Something already compatible with the current format.
# `alert` may be a string, or a dict with `title` and `body`.
# In 1.7.0 and 1.7.1, before 0912b5ba8 pre-1.8.0, the only
# item in `custom.zulip` is `message_ids`.
return data |
Parse GCM options, supplying defaults, and raising an error if invalid.
The options permitted here form part of the Zulip notification
bouncer's API. They are:
`priority`: Passed through to GCM; see upstream doc linked below.
Zulip servers should always set this; when unset, we guess a value
based on the behavior of old server versions.
Including unrecognized options is an error.
For details on options' semantics, see this GCM upstream doc:
https://firebase.google.com/docs/cloud-messaging/http-server-ref
Returns `priority`. | def parse_gcm_options(options: Dict[str, Any], data: Dict[str, Any]) -> str:
"""
Parse GCM options, supplying defaults, and raising an error if invalid.
The options permitted here form part of the Zulip notification
bouncer's API. They are:
`priority`: Passed through to GCM; see upstream doc linked below.
Zulip servers should always set this; when unset, we guess a value
based on the behavior of old server versions.
Including unrecognized options is an error.
For details on options' semantics, see this GCM upstream doc:
https://firebase.google.com/docs/cloud-messaging/http-server-ref
Returns `priority`.
"""
priority = options.pop("priority", None)
if priority is None:
# An older server. Identify if this seems to be an actual notification.
if data.get("event") == "message":
priority = "high"
else: # `'event': 'remove'`, presumably
priority = "normal"
if priority not in ("normal", "high"):
raise JsonableError(
_(
"Invalid GCM option to bouncer: priority {priority!r}",
).format(priority=priority)
)
if options:
# We're strict about the API; there is no use case for a newer Zulip
# server talking to an older bouncer, so we only need to provide
# one-way compatibility.
raise JsonableError(
_(
"Invalid GCM options to bouncer: {options}",
).format(options=orjson.dumps(options).decode())
)
return priority |
Send a GCM message to the given devices.
See https://firebase.google.com/docs/cloud-messaging/http-server-ref
for the GCM upstream API which this talks to.
data: The JSON object (decoded) to send as the 'data' parameter of
the GCM message.
options: Additional options to control the GCM message sent.
For details, see `parse_gcm_options`. | def send_android_push_notification(
user_identity: UserPushIdentityCompat,
devices: Sequence[DeviceToken],
data: Dict[str, Any],
options: Dict[str, Any],
remote: Optional["RemoteZulipServer"] = None,
) -> int:
"""
Send a GCM message to the given devices.
See https://firebase.google.com/docs/cloud-messaging/http-server-ref
for the GCM upstream API which this talks to.
data: The JSON object (decoded) to send as the 'data' parameter of
the GCM message.
options: Additional options to control the GCM message sent.
For details, see `parse_gcm_options`.
"""
if not devices:
return 0
if not gcm_client:
logger.debug(
"Skipping sending a GCM push notification since "
"PUSH_NOTIFICATION_BOUNCER_URL and ANDROID_GCM_API_KEY are both unset"
)
return 0
if remote:
logger.info(
"GCM: Sending notification for remote user %s:%s to %d devices",
remote.uuid,
user_identity,
len(devices),
)
else:
logger.info(
"GCM: Sending notification for local user %s to %d devices", user_identity, len(devices)
)
reg_ids = [device.token for device in devices]
priority = parse_gcm_options(options, data)
try:
# See https://firebase.google.com/docs/cloud-messaging/http-server-ref .
# Two kwargs `retries` and `session` get eaten by `json_request`;
# the rest pass through to the GCM server.
#
# One initial request plus 2 retries, with 5-second timeouts,
# and expected 1 + 2 seconds (the gcm module jitters its
# backoff by ±50%, so worst case * 1.5) between them, totals
# 18s expected, up to 19.5s worst case.
res = gcm_client.json_request(
registration_ids=reg_ids,
priority=priority,
data=data,
retries=2,
session=FCMSession(),
)
except OSError:
logger.warning("Error while pushing to GCM", exc_info=True)
return 0
successfully_sent_count = 0
if res and "success" in res:
for reg_id, msg_id in res["success"].items():
logger.info("GCM: Sent %s as %s", reg_id, msg_id)
successfully_sent_count = len(res["success"].keys())
if remote:
assert settings.ZILENCER_ENABLED
DeviceTokenClass: Type[AbstractPushDeviceToken] = RemotePushDeviceToken
else:
DeviceTokenClass = PushDeviceToken
# res.canonical will contain results when there are duplicate registrations for the same
# device. The "canonical" registration is the latest registration made by the device.
# Ref: https://developer.android.com/google/gcm/adv.html#canonical
if "canonical" in res:
for reg_id, new_reg_id in res["canonical"].items():
if reg_id == new_reg_id:
# I'm not sure if this should happen. In any case, not really actionable.
logger.warning("GCM: Got canonical ref but it already matches our ID %s!", reg_id)
elif not DeviceTokenClass._default_manager.filter(
token=new_reg_id, kind=DeviceTokenClass.GCM
).exists():
# This case shouldn't happen; any time we get a canonical ref it should have been
# previously registered in our system.
#
# That said, recovery is easy: just update the current PDT object to use the new ID.
logger.warning(
"GCM: Got canonical ref %s replacing %s but new ID not registered! Updating.",
new_reg_id,
reg_id,
)
DeviceTokenClass._default_manager.filter(
token=reg_id, kind=DeviceTokenClass.GCM
).update(token=new_reg_id)
else:
# Since we know the new ID is registered in our system we can just drop the old one.
logger.info("GCM: Got canonical ref %s, dropping %s", new_reg_id, reg_id)
DeviceTokenClass._default_manager.filter(
token=reg_id, kind=DeviceTokenClass.GCM
).delete()
if "errors" in res:
for error, reg_ids in res["errors"].items():
if error in ["NotRegistered", "InvalidRegistration"]:
for reg_id in reg_ids:
logger.info("GCM: Removing %s", reg_id)
# We remove all entries for this token (There
# could be multiple for different Zulip servers).
DeviceTokenClass._default_manager.filter(
token=reg_id, kind=DeviceTokenClass.GCM
).delete()
else:
for reg_id in reg_ids:
logger.warning("GCM: Delivery to %s failed: %s", reg_id, error)
return successfully_sent_count |
True just if this server has configured a way to send push notifications. | def push_notifications_configured() -> bool:
"""True just if this server has configured a way to send push notifications."""
if (
uses_notification_bouncer()
and settings.ZULIP_ORG_KEY is not None
and settings.ZULIP_ORG_ID is not None
): # nocoverage
# We have the needed configuration to send push notifications through
# the bouncer. Better yet would be to confirm that this config actually
# works -- e.g., that we have ever successfully sent to the bouncer --
# but this is a good start.
return True
if settings.DEVELOPMENT and (has_apns_credentials() or has_gcm_credentials()): # nocoverage
# Since much of the notifications logic is platform-specific, the mobile
# developers often work on just one platform at a time, so we should
# only require one to be configured.
return True
elif has_apns_credentials() and has_gcm_credentials(): # nocoverage
# We have the needed configuration to send through APNs and GCM directly
# (i.e., we are the bouncer, presumably.) Again, assume it actually works.
return True
return False |
Called during startup of the push notifications worker to check
whether we expect mobile push notifications to work on this server
and update state accordingly. | def initialize_push_notifications() -> None:
"""Called during startup of the push notifications worker to check
whether we expect mobile push notifications to work on this server
and update state accordingly.
"""
if sends_notifications_directly():
# This server sends push notifications directly. Make sure we
# are set to report to clients that push notifications are
# enabled.
for realm in Realm.objects.filter(push_notifications_enabled=False):
do_set_realm_property(realm, "push_notifications_enabled", True, acting_user=None)
do_set_push_notifications_enabled_end_timestamp(realm, None, acting_user=None)
return
if not push_notifications_configured():
for realm in Realm.objects.filter(push_notifications_enabled=True):
do_set_realm_property(realm, "push_notifications_enabled", False, acting_user=None)
do_set_push_notifications_enabled_end_timestamp(realm, None, acting_user=None)
if settings.DEVELOPMENT and not settings.TEST_SUITE:
# Avoid unnecessary spam on development environment startup
return # nocoverage
logger.warning(
"Mobile push notifications are not configured.\n "
"See https://zulip.readthedocs.io/en/latest/"
"production/mobile-push-notifications.html"
)
return
if uses_notification_bouncer():
# If we're using the notification bouncer, check if we can
# actually send push notifications, and update our
# understanding of that state for each realm accordingly.
send_server_data_to_push_bouncer(consider_usage_statistics=False)
return
logger.warning( # nocoverage
"Mobile push notifications are not fully configured.\n "
"See https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html"
)
for realm in Realm.objects.filter(push_notifications_enabled=True): # nocoverage
do_set_realm_property(realm, "push_notifications_enabled", False, acting_user=None)
do_set_push_notifications_enabled_end_timestamp(realm, None, acting_user=None) |
Common fields for all notification payloads. | def get_base_payload(user_profile: UserProfile) -> Dict[str, Any]:
"""Common fields for all notification payloads."""
data: Dict[str, Any] = {}
# These will let the app support logging into multiple realms and servers.
data["server"] = settings.EXTERNAL_HOST
data["realm_id"] = user_profile.realm.id
data["realm_uri"] = user_profile.realm.uri
data["realm_name"] = user_profile.realm.name
data["user_id"] = user_profile.id
return data |
Common fields for `message` payloads, for all platforms. | def get_message_payload(
user_profile: UserProfile,
message: Message,
mentioned_user_group_id: Optional[int] = None,
mentioned_user_group_name: Optional[str] = None,
can_access_sender: bool = True,
) -> Dict[str, Any]:
"""Common fields for `message` payloads, for all platforms."""
data = get_base_payload(user_profile)
# `sender_id` is preferred, but some existing versions use `sender_email`.
data["sender_id"] = message.sender.id
if not can_access_sender:
# A guest user can only receive a stream message from an
# inaccessible user as we allow unsubscribed users to send
# messages to streams. For direct messages, the guest gains
# access to the user if they where previously inaccessible.
data["sender_email"] = Address(
username=f"user{message.sender.id}", domain=get_fake_email_domain(message.realm.host)
).addr_spec
else:
data["sender_email"] = message.sender.email
data["time"] = datetime_to_timestamp(message.date_sent)
if mentioned_user_group_id is not None:
assert mentioned_user_group_name is not None
data["mentioned_user_group_id"] = mentioned_user_group_id
data["mentioned_user_group_name"] = mentioned_user_group_name
if message.recipient.type == Recipient.STREAM:
data["recipient_type"] = "stream"
data["stream"] = get_message_stream_name_from_database(message)
data["stream_id"] = message.recipient.type_id
data["topic"] = message.topic_name()
elif message.recipient.type == Recipient.DIRECT_MESSAGE_GROUP:
data["recipient_type"] = "private"
data["pm_users"] = huddle_users(message.recipient.id)
else: # Recipient.PERSONAL
data["recipient_type"] = "private"
return data |
On an iOS notification, this is the first bolded line. | def get_apns_alert_title(message: Message) -> str:
"""
On an iOS notification, this is the first bolded line.
"""
if message.recipient.type == Recipient.DIRECT_MESSAGE_GROUP:
recipients = get_display_recipient(message.recipient)
assert isinstance(recipients, list)
return ", ".join(sorted(r["full_name"] for r in recipients))
elif message.is_stream_message():
stream_name = get_message_stream_name_from_database(message)
return f"#{stream_name} > {message.topic_name()}"
# For 1:1 direct messages, we just show the sender name.
return message.sender.full_name |
On an iOS notification, this is the second bolded line. | def get_apns_alert_subtitle(
message: Message,
trigger: str,
user_profile: UserProfile,
mentioned_user_group_name: Optional[str] = None,
can_access_sender: bool = True,
) -> str:
"""
On an iOS notification, this is the second bolded line.
"""
sender_name = message.sender.full_name
if not can_access_sender:
# A guest user can only receive a stream message from an
# inaccessible user as we allow unsubscribed users to send
# messages to streams. For direct messages, the guest gains
# access to the user if they where previously inaccessible.
sender_name = str(UserProfile.INACCESSIBLE_USER_NAME)
if trigger == NotificationTriggers.MENTION:
if mentioned_user_group_name is not None:
return _("{full_name} mentioned @{user_group_name}:").format(
full_name=sender_name, user_group_name=mentioned_user_group_name
)
else:
return _("{full_name} mentioned you:").format(full_name=sender_name)
elif trigger in (
NotificationTriggers.TOPIC_WILDCARD_MENTION_IN_FOLLOWED_TOPIC,
NotificationTriggers.STREAM_WILDCARD_MENTION_IN_FOLLOWED_TOPIC,
NotificationTriggers.TOPIC_WILDCARD_MENTION,
NotificationTriggers.STREAM_WILDCARD_MENTION,
):
return _("{full_name} mentioned everyone:").format(full_name=sender_name)
elif message.recipient.type == Recipient.PERSONAL:
return ""
# For group direct messages, or regular messages to a stream,
# just use a colon to indicate this is the sender.
return sender_name + ":" |
A `message` payload for iOS, via APNs. | def get_message_payload_apns(
user_profile: UserProfile,
message: Message,
trigger: str,
mentioned_user_group_id: Optional[int] = None,
mentioned_user_group_name: Optional[str] = None,
can_access_sender: bool = True,
) -> Dict[str, Any]:
"""A `message` payload for iOS, via APNs."""
zulip_data = get_message_payload(
user_profile, message, mentioned_user_group_id, mentioned_user_group_name, can_access_sender
)
zulip_data.update(
message_ids=[message.id],
)
assert message.rendered_content is not None
with override_language(user_profile.default_language):
content, _ = truncate_content(get_mobile_push_content(message.rendered_content))
apns_data = {
"alert": {
"title": get_apns_alert_title(message),
"subtitle": get_apns_alert_subtitle(
message, trigger, user_profile, mentioned_user_group_name, can_access_sender
),
"body": content,
},
"sound": "default",
"badge": get_apns_badge_count(user_profile),
"custom": {"zulip": zulip_data},
}
return apns_data |
A `message` payload + options, for Android via GCM/FCM. | def get_message_payload_gcm(
user_profile: UserProfile,
message: Message,
mentioned_user_group_id: Optional[int] = None,
mentioned_user_group_name: Optional[str] = None,
can_access_sender: bool = True,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""A `message` payload + options, for Android via GCM/FCM."""
data = get_message_payload(
user_profile, message, mentioned_user_group_id, mentioned_user_group_name, can_access_sender
)
if not can_access_sender:
# A guest user can only receive a stream message from an
# inaccessible user as we allow unsubscribed users to send
# messages to streams. For direct messages, the guest gains
# access to the user if they where previously inaccessible.
sender_avatar_url = get_avatar_for_inaccessible_user()
sender_name = str(UserProfile.INACCESSIBLE_USER_NAME)
else:
sender_avatar_url = absolute_avatar_url(message.sender)
sender_name = message.sender.full_name
assert message.rendered_content is not None
with override_language(user_profile.default_language):
content, truncated = truncate_content(get_mobile_push_content(message.rendered_content))
data.update(
event="message",
zulip_message_id=message.id, # message_id is reserved for CCS
content=content,
content_truncated=truncated,
sender_full_name=sender_name,
sender_avatar_url=sender_avatar_url,
)
gcm_options = {"priority": "high"}
return data, gcm_options |
A `remove` payload + options, for Android via GCM/FCM. | def get_remove_payload_gcm(
user_profile: UserProfile,
message_ids: List[int],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""A `remove` payload + options, for Android via GCM/FCM."""
gcm_payload = get_base_payload(user_profile)
gcm_payload.update(
event="remove",
zulip_message_ids=",".join(str(id) for id in message_ids),
# Older clients (all clients older than 2019-02-13) look only at
# `zulip_message_id` and ignore `zulip_message_ids`. Do our best.
zulip_message_id=message_ids[0],
)
gcm_options = {"priority": "normal"}
return gcm_payload, gcm_options |
This should be called when a message that previously had a
mobile push notification executed is read. This triggers a push to the
mobile app, when the message is read on the server, to remove the
message from the notification. | def handle_remove_push_notification(user_profile_id: int, message_ids: List[int]) -> None:
"""This should be called when a message that previously had a
mobile push notification executed is read. This triggers a push to the
mobile app, when the message is read on the server, to remove the
message from the notification.
"""
if not push_notifications_configured():
return
user_profile = get_user_profile_by_id(user_profile_id)
# We may no longer have access to the message here; for example,
# the user (1) got a message, (2) read the message in the web UI,
# and then (3) it was deleted. When trying to send the push
# notification for (2), after (3) has happened, there is no
# message to fetch -- but we nonetheless want to remove the mobile
# notification. Because of this, verification of access to
# the messages is skipped here.
# Because of this, no access to the Message objects should be
# done; they are treated as a list of opaque ints.
# APNs has a 4KB limit on the maximum size of messages, which
# translated to several hundred message IDs in one of these
# notifications. In rare cases, it's possible for someone to mark
# thousands of push notification eligible messages as read at
# once. We could handle this situation with a loop, but we choose
# to truncate instead to avoid extra network traffic, because it's
# very likely the user has manually cleared the notifications in
# their mobile device's UI anyway.
#
# When truncating, we keep only the newest N messages in this
# remove event. This is optimal because older messages are the
# ones most likely to have already been manually cleared at some
# point in the past.
#
# We choose 200 here because a 10-digit message ID plus a comma and
# space consume 12 bytes, and 12 x 200 = 2400 bytes is still well
# below the 4KB limit (leaving plenty of space for metadata).
MAX_APNS_MESSAGE_IDS = 200
truncated_message_ids = sorted(message_ids)[-MAX_APNS_MESSAGE_IDS:]
gcm_payload, gcm_options = get_remove_payload_gcm(user_profile, truncated_message_ids)
apns_payload = get_remove_payload_apns(user_profile, truncated_message_ids)
android_devices = list(
PushDeviceToken.objects.filter(user=user_profile, kind=PushDeviceToken.GCM).order_by("id")
)
apple_devices = list(
PushDeviceToken.objects.filter(user=user_profile, kind=PushDeviceToken.APNS).order_by("id")
)
if uses_notification_bouncer():
send_notifications_to_bouncer(
user_profile, apns_payload, gcm_payload, gcm_options, android_devices, apple_devices
)
else:
user_identity = UserPushIdentityCompat(user_id=user_profile_id)
android_successfully_sent_count = send_android_push_notification(
user_identity, android_devices, gcm_payload, gcm_options
)
apple_successfully_sent_count = send_apple_push_notification(
user_identity, apple_devices, apns_payload
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["mobile_pushes_sent::day"],
None,
timezone_now(),
increment=android_successfully_sent_count + apple_successfully_sent_count,
)
# We intentionally use the non-truncated message_ids here. We are
# assuming in this very rare case that the user has manually
# dismissed these notifications on the device side, and the server
# should no longer track them as outstanding notifications.
with transaction.atomic(savepoint=False):
UserMessage.select_for_update_query().filter(
user_profile_id=user_profile_id,
message_id__in=message_ids,
).update(flags=F("flags").bitand(~UserMessage.flags.active_mobile_push_notification)) |
missed_message is the event received by the
zerver.worker.missedmessage_mobile_notifications.PushNotificationWorker.consume function. | def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any]) -> None:
"""
missed_message is the event received by the
zerver.worker.missedmessage_mobile_notifications.PushNotificationWorker.consume function.
"""
if not push_notifications_configured():
return
user_profile = get_user_profile_by_id(user_profile_id)
if user_profile.is_bot: # nocoverage
# We don't expect to reach here for bot users. However, this code exists
# to find and throw away any pre-existing events in the queue while
# upgrading from versions before our notifiability logic was implemented.
# TODO/compatibility: This block can be removed when one can no longer
# upgrade from versions <= 4.0 to versions >= 5.0
logger.warning(
"Send-push-notification event found for bot user %s. Skipping.", user_profile_id
)
return
if not (
user_profile.enable_offline_push_notifications
or user_profile.enable_online_push_notifications
):
# BUG: Investigate why it's possible to get here.
return # nocoverage
with transaction.atomic(savepoint=False):
try:
(message, user_message) = access_message_and_usermessage(
user_profile, missed_message["message_id"], lock_message=True
)
except JsonableError:
if ArchivedMessage.objects.filter(id=missed_message["message_id"]).exists():
# If the cause is a race with the message being deleted,
# that's normal and we have no need to log an error.
return
logging.info(
"Unexpected message access failure handling push notifications: %s %s",
user_profile.id,
missed_message["message_id"],
)
return
if user_message is not None:
# If the user has read the message already, don't push-notify.
if user_message.flags.read or user_message.flags.active_mobile_push_notification:
return
# Otherwise, we mark the message as having an active mobile
# push notification, so that we can send revocation messages
# later.
user_message.flags.active_mobile_push_notification = True
user_message.save(update_fields=["flags"])
else:
# Users should only be getting push notifications into this
# queue for messages they haven't received if they're
# long-term idle; anything else is likely a bug.
if not user_profile.long_term_idle:
logger.error(
"Could not find UserMessage with message_id %s and user_id %s",
missed_message["message_id"],
user_profile_id,
exc_info=True,
)
return
trigger = missed_message["trigger"]
# TODO/compatibility: Translation code for the rename of
# `wildcard_mentioned` to `stream_wildcard_mentioned`.
# Remove this when one can no longer directly upgrade from 7.x to main.
if trigger == "wildcard_mentioned":
trigger = NotificationTriggers.STREAM_WILDCARD_MENTION # nocoverage
# TODO/compatibility: Translation code for the rename of
# `followed_topic_wildcard_mentioned` to `stream_wildcard_mentioned_in_followed_topic`.
# Remove this when one can no longer directly upgrade from 7.x to main.
if trigger == "followed_topic_wildcard_mentioned":
trigger = NotificationTriggers.STREAM_WILDCARD_MENTION_IN_FOLLOWED_TOPIC # nocoverage
# TODO/compatibility: Translation code for the rename of
# `private_message` to `direct_message`. Remove this when
# one can no longer directly upgrade from 7.x to main.
if trigger == "private_message":
trigger = NotificationTriggers.DIRECT_MESSAGE # nocoverage
# mentioned_user_group will be None if the user is personally mentioned
# regardless whether they are a member of the mentioned user group in the
# message or not.
mentioned_user_group_id = None
mentioned_user_group_name = None
mentioned_user_group_members_count = None
mentioned_user_group = get_mentioned_user_group([missed_message], user_profile)
if mentioned_user_group is not None:
mentioned_user_group_id = mentioned_user_group.id
mentioned_user_group_name = mentioned_user_group.name
mentioned_user_group_members_count = mentioned_user_group.members_count
# Soft reactivate if pushing to a long_term_idle user that is personally mentioned
soft_reactivate_if_personal_notification(
user_profile, {trigger}, mentioned_user_group_members_count
)
if message.is_stream_message():
# This will almost always be True. The corner case where you
# can be receiving a message from a user you cannot access
# involves your being a guest user whose access is restricted
# by a can_access_all_users_group policy, and you can't access
# the sender because they are sending a message to a public
# stream that you are subscribed to but they are not.
can_access_sender = check_can_access_user(message.sender, user_profile)
else:
# For private messages, the recipient will gain access
# to the sender if they did not had access previously.
can_access_sender = True
apns_payload = get_message_payload_apns(
user_profile,
message,
trigger,
mentioned_user_group_id,
mentioned_user_group_name,
can_access_sender,
)
gcm_payload, gcm_options = get_message_payload_gcm(
user_profile, message, mentioned_user_group_id, mentioned_user_group_name, can_access_sender
)
logger.info("Sending push notifications to mobile clients for user %s", user_profile_id)
android_devices = list(
PushDeviceToken.objects.filter(user=user_profile, kind=PushDeviceToken.GCM).order_by("id")
)
apple_devices = list(
PushDeviceToken.objects.filter(user=user_profile, kind=PushDeviceToken.APNS).order_by("id")
)
if uses_notification_bouncer():
send_notifications_to_bouncer(
user_profile, apns_payload, gcm_payload, gcm_options, android_devices, apple_devices
)
return
logger.info(
"Sending mobile push notifications for local user %s: %s via FCM devices, %s via APNs devices",
user_profile_id,
len(android_devices),
len(apple_devices),
)
user_identity = UserPushIdentityCompat(user_id=user_profile.id)
apple_successfully_sent_count = send_apple_push_notification(
user_identity, apple_devices, apns_payload
)
android_successfully_sent_count = send_android_push_notification(
user_identity, android_devices, gcm_payload, gcm_options
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["mobile_pushes_sent::day"],
None,
timezone_now(),
increment=apple_successfully_sent_count + android_successfully_sent_count,
) |
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids. | def query_for_ids(
query: ValuesQuerySet[ModelT, RowT],
user_ids: List[int],
field: str,
) -> ValuesQuerySet[ModelT, RowT]:
"""
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
"""
assert user_ids
clause = f"{field} IN %s"
query = query.extra(
where=[clause],
params=(tuple(user_ids),),
)
return query |
Returns whether or not a user was rate limited. Will raise a RateLimitedError exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information | def rate_limit_user(request: HttpRequest, user: UserProfile, domain: str) -> None:
"""Returns whether or not a user was rate limited. Will raise a RateLimitedError exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
if not should_rate_limit(request):
return
RateLimitedUser(user, domain=domain).rate_limit_request(request) |
While it does actually send the notice, this function has a lot of
code and comments around error handling for the push notifications
bouncer. There are several classes of failures, each with its own
potential solution:
* Network errors with requests.request. We raise an exception to signal
it to the callers.
* 500 errors from the push bouncer or other unexpected responses;
we don't try to parse the response, but do make clear the cause.
* 400 errors from the push bouncer. Here there are 2 categories:
Our server failed to connect to the push bouncer (should throw)
vs. client-side errors like an invalid token. | def send_to_push_bouncer(
method: str,
endpoint: str,
post_data: Union[bytes, Mapping[str, Union[str, int, None, bytes]]],
extra_headers: Mapping[str, str] = {},
) -> Dict[str, object]:
"""While it does actually send the notice, this function has a lot of
code and comments around error handling for the push notifications
bouncer. There are several classes of failures, each with its own
potential solution:
* Network errors with requests.request. We raise an exception to signal
it to the callers.
* 500 errors from the push bouncer or other unexpected responses;
we don't try to parse the response, but do make clear the cause.
* 400 errors from the push bouncer. Here there are 2 categories:
Our server failed to connect to the push bouncer (should throw)
vs. client-side errors like an invalid token.
"""
assert settings.PUSH_NOTIFICATION_BOUNCER_URL is not None
assert settings.ZULIP_ORG_ID is not None
assert settings.ZULIP_ORG_KEY is not None
url = urljoin(settings.PUSH_NOTIFICATION_BOUNCER_URL, "/api/v1/remotes/" + endpoint)
api_auth = requests.auth.HTTPBasicAuth(settings.ZULIP_ORG_ID, settings.ZULIP_ORG_KEY)
headers = {"User-agent": f"ZulipServer/{ZULIP_VERSION}"}
headers.update(extra_headers)
if endpoint == "server/analytics":
# Uploading audit log and/or analytics data can require the
# bouncer to do a significant chunk of work in a few
# situations; since this occurs in background jobs, set a long
# timeout.
session = PushBouncerSession(timeout=90)
else:
session = PushBouncerSession()
try:
res = session.request(
method,
url,
data=post_data,
auth=api_auth,
verify=True,
headers=headers,
)
except (
requests.exceptions.Timeout,
requests.exceptions.SSLError,
requests.exceptions.ConnectionError,
) as e:
raise PushNotificationBouncerRetryLaterError(
f"{type(e).__name__} while trying to connect to push notification bouncer"
)
if res.status_code >= 500:
# 5xx's should be resolved by the people who run the push
# notification bouncer service, and they'll get an appropriate
# error notification from the server. We raise an exception to signal
# to the callers that the attempt failed and they can retry.
error_msg = f"Received {res.status_code} from push notification bouncer"
logging.warning(error_msg)
raise PushNotificationBouncerServerError(error_msg)
elif res.status_code >= 400:
# If JSON parsing errors, just let that exception happen
result_dict = orjson.loads(res.content)
msg = result_dict["msg"]
if "code" in result_dict and result_dict["code"] == "INVALID_ZULIP_SERVER":
# Invalid Zulip server credentials should email this server's admins
raise PushNotificationBouncerError(
_("Push notifications bouncer error: {error}").format(error=msg)
)
elif "code" in result_dict and result_dict["code"] == "PUSH_NOTIFICATIONS_DISALLOWED":
from zerver.lib.push_notifications import PushNotificationsDisallowedByBouncerError
raise PushNotificationsDisallowedByBouncerError(reason=msg)
elif (
endpoint == "push/test_notification"
and "code" in result_dict
and result_dict["code"] == "INVALID_REMOTE_PUSH_DEVICE_TOKEN"
):
# This error from the notification debugging endpoint should just be directly
# communicated to the device.
# TODO: Extend this to use a more general mechanism when we add more such error responses.
from zerver.lib.push_notifications import InvalidRemotePushDeviceTokenError
raise InvalidRemotePushDeviceTokenError
elif (
endpoint == "server/billing"
and "code" in result_dict
and result_dict["code"] == "MISSING_REMOTE_REALM"
): # nocoverage
# The callers requesting this endpoint want the exception to propagate
# so they can catch it.
raise MissingRemoteRealmError
elif (
endpoint == "server/billing"
and "code" in result_dict
and result_dict["code"] == "REMOTE_REALM_SERVER_MISMATCH_ERROR"
): # nocoverage
# The callers requesting this endpoint want the exception to propagate
# so they can catch it.
raise RemoteRealmServerMismatchError
else:
# But most other errors coming from the push bouncer
# server are client errors (e.g. never-registered token)
# and should be handled as such.
raise JsonableError(msg)
elif res.status_code != 200:
# Anything else is unexpected and likely suggests a bug in
# this version of Zulip, so we throw an exception that will
# email the server admins.
raise PushNotificationBouncerError(
f"Push notification bouncer returned unexpected status code {res.status_code}"
)
# If we don't throw an exception, it's a successful bounce!
return orjson.loads(res.content) |
This should only be needed in middleware; in app code, just raise.
When app code raises a JsonableError, the JsonErrorHandler
middleware takes care of transforming it into a response by
calling this function. | def json_response_from_error(exception: JsonableError) -> MutableJsonResponse:
"""
This should only be needed in middleware; in app code, just raise.
When app code raises a JsonableError, the JsonErrorHandler
middleware takes care of transforming it into a response by
calling this function.
"""
response_type = "error"
if 200 <= exception.http_status_code < 300:
response_type = "success"
response = json_response(
response_type, msg=exception.msg, data=exception.data, status=exception.http_status_code
)
for header, value in exception.extra_headers.items():
response[header] = value
return response |
Patched version of the standard Django never_cache_responses
decorator that adds headers to a response so that it will never be
cached, unless the view code has already set a Cache-Control
header. | def default_never_cache_responses(
view_func: Callable[Concatenate[HttpRequest, ParamT], HttpResponse],
) -> Callable[Concatenate[HttpRequest, ParamT], HttpResponse]:
"""Patched version of the standard Django never_cache_responses
decorator that adds headers to a response so that it will never be
cached, unless the view code has already set a Cache-Control
header.
"""
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, /, *args: ParamT.args, **kwargs: ParamT.kwargs
) -> HttpResponse:
response = view_func(request, *args, **kwargs)
if response.has_header("Cache-Control"):
return response
add_never_cache_headers(response)
return response
return _wrapped_view_func |
Helper for REST API request dispatch. The rest_dispatch_kwargs
parameter is expected to be a dictionary mapping HTTP methods to
a mix of view functions and (view_function, {view_flags}) tuples.
* Returns an error HttpResponse for unsupported HTTP methods.
* Otherwise, returns a tuple containing the view function
corresponding to the request's HTTP method, as well as the
appropriate set of view flags.
HACK: Mutates the passed rest_dispatch_kwargs, removing the HTTP
method details but leaving any other parameters for the caller to
pass directly to the view function. We should see if we can remove
this feature; it's not clear it's actually used. | def get_target_view_function_or_response(
request: HttpRequest, rest_dispatch_kwargs: Dict[str, object]
) -> Union[Tuple[Callable[..., HttpResponse], Set[str]], HttpResponse]:
"""Helper for REST API request dispatch. The rest_dispatch_kwargs
parameter is expected to be a dictionary mapping HTTP methods to
a mix of view functions and (view_function, {view_flags}) tuples.
* Returns an error HttpResponse for unsupported HTTP methods.
* Otherwise, returns a tuple containing the view function
corresponding to the request's HTTP method, as well as the
appropriate set of view flags.
HACK: Mutates the passed rest_dispatch_kwargs, removing the HTTP
method details but leaving any other parameters for the caller to
pass directly to the view function. We should see if we can remove
this feature; it's not clear it's actually used.
"""
supported_methods: Dict[str, object] = {}
request_notes = RequestNotes.get_notes(request)
if request_notes.saved_response is not None:
# For completing long-polled Tornado requests, we skip the
# view function logic and just return the response.
return request_notes.saved_response
# The list() duplicates rest_dispatch_kwargs, since this loop
# mutates the original.
for arg in list(rest_dispatch_kwargs):
if arg in METHODS:
supported_methods[arg] = rest_dispatch_kwargs[arg]
del rest_dispatch_kwargs[arg]
if "GET" in supported_methods:
supported_methods.setdefault("HEAD", supported_methods["GET"])
if request.method == "OPTIONS":
response = HttpResponse(status=204) # No content
response["Allow"] = ", ".join(sorted(supported_methods.keys()))
return response
# Override requested method if magic method=??? parameter exists
method_to_use = request.method
if request.POST and "method" in request.POST:
method_to_use = request.POST["method"]
if method_to_use in supported_methods:
entry = supported_methods[method_to_use]
if isinstance(entry, tuple):
handler, view_flags = entry
assert callable(handler)
assert isinstance(view_flags, set)
return handler, view_flags
assert callable(entry)
return entry, set()
return json_method_not_allowed(list(supported_methods.keys())) |
Dispatch to a REST API endpoint.
Authentication is verified in the following ways:
* for paths beginning with /api, HTTP basic auth
* for paths beginning with /json (used by the web client), the session token
Unauthenticated requests may use this endpoint only with the
allow_anonymous_user_web view flag.
This calls the function named in kwargs[request.method], if that request
method is supported, and after wrapping that function to:
* protect against CSRF (if the user is already authenticated through
a Django session)
* authenticate via an API key (otherwise)
* coerce PUT/PATCH/DELETE into having POST-like semantics for
retrieving variables
Any keyword args that are *not* HTTP methods are passed through to the
target function.
Never make a urls.py pattern put user input into a variable called GET, POST,
etc, as that is where we route HTTP verbs to target functions. | def rest_dispatch(request: HttpRequest, /, **kwargs: object) -> HttpResponse:
"""Dispatch to a REST API endpoint.
Authentication is verified in the following ways:
* for paths beginning with /api, HTTP basic auth
* for paths beginning with /json (used by the web client), the session token
Unauthenticated requests may use this endpoint only with the
allow_anonymous_user_web view flag.
This calls the function named in kwargs[request.method], if that request
method is supported, and after wrapping that function to:
* protect against CSRF (if the user is already authenticated through
a Django session)
* authenticate via an API key (otherwise)
* coerce PUT/PATCH/DELETE into having POST-like semantics for
retrieving variables
Any keyword args that are *not* HTTP methods are passed through to the
target function.
Never make a urls.py pattern put user input into a variable called GET, POST,
etc, as that is where we route HTTP verbs to target functions.
"""
result = get_target_view_function_or_response(request, kwargs)
if isinstance(result, HttpResponse):
return result
target_function, view_flags = result
request_notes = RequestNotes.get_notes(request)
# Set request_notes.query for update_activity_user(), which is called
# by some of the later wrappers.
request_notes.query = target_function.__name__
# We want to support authentication by both cookies (web client)
# and API keys (API clients). In the former case, we want to
# do a check to ensure that CSRF etc is honored, but in the latter
# we can skip all of that.
#
# Security implications of this portion of the code are minimal,
# as we should worst-case fail closed if we miscategorize a request.
# for some special views (e.g. serving a file that has been
# uploaded), we support using the same URL for web and API clients.
if "override_api_url_scheme" in view_flags and "Authorization" in request.headers:
# This request uses standard API based authentication.
# For override_api_url_scheme views, we skip our normal
# rate limiting, because there are good reasons clients
# might need to (e.g.) request a large number of uploaded
# files or avatars in quick succession.
target_function = authenticated_rest_api_view(skip_rate_limiting=True)(target_function)
elif "override_api_url_scheme" in view_flags and request.GET.get("api_key") is not None:
# This request uses legacy API authentication. We
# unfortunately need that in the React Native mobile apps,
# because there's no way to set the Authorization header in
# React Native. See last block for rate limiting notes.
target_function = authenticated_uploads_api_view(skip_rate_limiting=True)(target_function)
# /json views (web client) validate with a session token (cookie)
elif not request.path.startswith("/api") and request.user.is_authenticated:
# Authenticated via sessions framework, only CSRF check needed
auth_kwargs = {}
if "override_api_url_scheme" in view_flags:
auth_kwargs["skip_rate_limiting"] = True
target_function = csrf_protect(authenticated_json_view(target_function, **auth_kwargs))
# most clients (mobile, bots, etc) use HTTP basic auth and REST calls, where instead of
# username:password, we use email:apiKey
elif request.path.startswith("/api") and "Authorization" in request.headers:
# Wrap function with decorator to authenticate the user before
# proceeding
target_function = authenticated_rest_api_view(
allow_webhook_access="allow_incoming_webhooks" in view_flags,
)(target_function)
elif (
request.path.startswith(("/json", "/avatar", "/user_uploads", "/thumbnail"))
and "allow_anonymous_user_web" in view_flags
):
# For endpoints that support anonymous web access, we do that.
# TODO: Allow /api calls when this is stable enough.
target_function = csrf_protect(public_json_view(target_function))
else:
# Otherwise, throw an authentication error; our middleware
# will generate the appropriate HTTP response.
raise MissingAuthenticationError
if request.method in ["DELETE", "PATCH", "PUT"]:
# process_as_post needs to be the outer decorator, because
# otherwise we might access and thus cache a value for
# request.POST.
target_function = process_as_post(target_function)
return target_function(request, **kwargs) |
Core helper for bulk moving rows between a table and its archive table | def move_rows(
base_model: Type[Model],
raw_query: SQL,
*,
src_db_table: Optional[str] = None,
returning_id: bool = False,
**kwargs: Composable,
) -> List[int]:
"""Core helper for bulk moving rows between a table and its archive table"""
if src_db_table is None:
# Use base_model's db_table unless otherwise specified.
src_db_table = base_model._meta.db_table
fields = [field for field in base_model._meta.fields if field not in EXCLUDE_FIELDS]
src_fields = [Identifier(src_db_table, field.column) for field in fields]
dst_fields = [Identifier(field.column) for field in fields]
with connection.cursor() as cursor:
cursor.execute(
raw_query.format(
src_fields=SQL(",").join(src_fields), dst_fields=SQL(",").join(dst_fields), **kwargs
)
)
if returning_id:
return [id for (id,) in cursor.fetchall()] # return list of row ids
else:
return [] |
This function constructs a list of (realm, streams_of_the_realm) tuples
where each realm is a Realm that requires calling the archiving functions on it,
and streams_of_the_realm is a list of streams of the realm to call archive_stream_messages with.
The purpose of this is performance - for servers with thousands of realms, it is important
to fetch all this data in bulk. | def get_realms_and_streams_for_archiving() -> List[Tuple[Realm, List[Stream]]]:
"""
This function constructs a list of (realm, streams_of_the_realm) tuples
where each realm is a Realm that requires calling the archiving functions on it,
and streams_of_the_realm is a list of streams of the realm to call archive_stream_messages with.
The purpose of this is performance - for servers with thousands of realms, it is important
to fetch all this data in bulk.
"""
realm_id_to_realm = {}
realm_id_to_streams_list: Dict[int, List[Stream]] = {}
# All realms with a retention policy set qualify for archiving:
for realm in Realm.objects.exclude(message_retention_days=-1):
realm_id_to_realm[realm.id] = realm
realm_id_to_streams_list[realm.id] = []
# Now we find all streams that require archiving.
# First category are streams in retention-enabled realms,
# that don't have retention explicitly disabled (through the value -1).
query_one = (
Stream.objects.exclude(message_retention_days=-1)
.exclude(realm__message_retention_days=-1)
.select_related("realm", "recipient")
)
# Second category are streams that are in realms without a realm-wide retention policy,
# but have their own stream-specific policy enabled.
query_two = (
Stream.objects.filter(realm__message_retention_days=-1)
.exclude(message_retention_days__isnull=True)
.exclude(message_retention_days=-1)
.select_related("realm", "recipient")
)
query = query_one.union(query_two)
for stream in query:
realm = stream.realm
realm_id_to_realm[realm.id] = realm
if realm.id not in realm_id_to_streams_list:
realm_id_to_streams_list[realm.id] = []
realm_id_to_streams_list[realm.id].append(stream)
return [
(realm_id_to_realm[realm_id], realm_id_to_streams_list[realm_id])
for realm_id in realm_id_to_realm
] |
Utility function for calling in the Django shell if a stream's policy was
set to something too aggressive and the administrator wants to restore
the messages deleted as a result. | def restore_retention_policy_deletions_for_stream(stream: Stream) -> None:
"""
Utility function for calling in the Django shell if a stream's policy was
set to something too aggressive and the administrator wants to restore
the messages deleted as a result.
"""
relevant_transactions = ArchiveTransaction.objects.filter(
archivedmessage__recipient=stream.recipient, type=ArchiveTransaction.RETENTION_POLICY_BASED
).distinct("id")
restore_data_from_archive_by_transactions(list(relevant_transactions)) |
This function deletes archived data that was archived at least
settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS days ago.
It works by deleting ArchiveTransaction objects that are
sufficiently old. We've configured most archive tables, like
ArchiveMessage, with on_delete=CASCADE, so that deleting an
ArchiveTransaction entails deleting associated objects, including
ArchivedMessage, ArchivedUserMessage, ArchivedReaction.
The exception to this rule is ArchivedAttachment. Archive
attachment objects that were only referenced by ArchivedMessage
objects that have now been deleted will be left with an empty
`.messages` relation. A separate step,
delete_old_unclaimed_attachments, will delete those
ArchivedAttachment objects (and delete the files themselves from
the storage). | def clean_archived_data() -> None:
"""This function deletes archived data that was archived at least
settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS days ago.
It works by deleting ArchiveTransaction objects that are
sufficiently old. We've configured most archive tables, like
ArchiveMessage, with on_delete=CASCADE, so that deleting an
ArchiveTransaction entails deleting associated objects, including
ArchivedMessage, ArchivedUserMessage, ArchivedReaction.
The exception to this rule is ArchivedAttachment. Archive
attachment objects that were only referenced by ArchivedMessage
objects that have now been deleted will be left with an empty
`.messages` relation. A separate step,
delete_old_unclaimed_attachments, will delete those
ArchivedAttachment objects (and delete the files themselves from
the storage).
"""
logger.info("Cleaning old archive data.")
check_date = timezone_now() - timedelta(days=settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS)
# Associated archived objects will get deleted through the on_delete=CASCADE property:
count = 0
transaction_ids = list(
ArchiveTransaction.objects.filter(timestamp__lt=check_date).values_list("id", flat=True)
)
while len(transaction_ids) > 0:
transaction_block = transaction_ids[0:TRANSACTION_DELETION_BATCH_SIZE]
transaction_ids = transaction_ids[TRANSACTION_DELETION_BATCH_SIZE:]
ArchiveTransaction.objects.filter(id__in=transaction_block).delete()
count += len(transaction_block)
logger.info("Deleted %s old ArchiveTransactions.", count) |
Registered as GET_EXTRA_MODEL_FILTER_KWARGS_GETTER in our
SCIM configuration.
Returns a function which generates additional kwargs
to add to QuerySet's .filter() when fetching a UserProfile
corresponding to the requested SCIM User from the database.
It's *crucial* for security that we filter by realm_id (based on
the subdomain of the request) to prevent a SCIM client authorized
for subdomain X from being able to interact with all of the Users
on the entire server.
This should be extended for Groups when implementing them by
checking the `model` parameter; because we only support
UserProfiles, such a check is unnecessary. | def get_extra_model_filter_kwargs_getter(
model: Type[models.Model],
) -> Callable[[HttpRequest, Any, Any], Dict[str, object]]:
"""Registered as GET_EXTRA_MODEL_FILTER_KWARGS_GETTER in our
SCIM configuration.
Returns a function which generates additional kwargs
to add to QuerySet's .filter() when fetching a UserProfile
corresponding to the requested SCIM User from the database.
It's *crucial* for security that we filter by realm_id (based on
the subdomain of the request) to prevent a SCIM client authorized
for subdomain X from being able to interact with all of the Users
on the entire server.
This should be extended for Groups when implementing them by
checking the `model` parameter; because we only support
UserProfiles, such a check is unnecessary.
"""
def get_extra_filter_kwargs(
request: HttpRequest, *args: Any, **kwargs: Any
) -> Dict[str, object]:
realm = RequestNotes.get_notes(request).realm
assert realm is not None
return {"realm_id": realm.id, "is_bot": False}
return get_extra_filter_kwargs |
Used as the base url for constructing the Location of a SCIM resource.
Since SCIM synchronization is scoped to an individual realm, we
need these locations to be namespaced within the realm's domain
namespace, which is conveniently accessed via realm.uri. | def base_scim_location_getter(request: HttpRequest, *args: Any, **kwargs: Any) -> str:
"""Used as the base url for constructing the Location of a SCIM resource.
Since SCIM synchronization is scoped to an individual realm, we
need these locations to be namespaced within the realm's domain
namespace, which is conveniently accessed via realm.uri.
"""
realm = RequestNotes.get_notes(request).realm
assert realm is not None
return realm.uri |
Unlike most scheduled emails, invitation emails don't have an
existing user object to key off of, so we filter by address here. | def clear_scheduled_invitation_emails(email: str) -> None:
"""Unlike most scheduled emails, invitation emails don't have an
existing user object to key off of, so we filter by address here."""
items = ScheduledEmail.objects.filter(
address__iexact=email, type=ScheduledEmail.INVITATION_REMINDER
)
items.delete() |
Helper for `manage.py send_custom_email`.
Can be used directly with from a management shell with
send_custom_email(user_profile_list, dict(
markdown_template_path="/path/to/markdown/file.md",
subject="Email subject",
from_name="Sender Name")
) | def send_custom_email(
users: QuerySet[UserProfile],
*,
dry_run: bool,
options: Dict[str, str],
add_context: Optional[Callable[[Dict[str, object], UserProfile], None]] = None,
distinct_email: bool = False,
) -> QuerySet[UserProfile]:
"""
Helper for `manage.py send_custom_email`.
Can be used directly with from a management shell with
send_custom_email(user_profile_list, dict(
markdown_template_path="/path/to/markdown/file.md",
subject="Email subject",
from_name="Sender Name")
)
"""
email_sender = custom_email_sender(**options, dry_run=dry_run)
users = users.select_related("realm")
if distinct_email:
users = (
users.annotate(lower_email=Lower("delivery_email"))
.distinct("lower_email")
.order_by("lower_email", "id")
)
else:
users = users.order_by("id")
for user_profile in users:
context: Dict[str, object] = {
"realm": user_profile.realm,
"realm_string_id": user_profile.realm.string_id,
"realm_uri": user_profile.realm.uri,
"realm_name": user_profile.realm.name,
}
if add_context is not None:
add_context(context, user_profile)
email_sender(
to_user_id=user_profile.id,
context=context,
)
if dry_run:
break
return users |
The purpose of this function is to log (potential) config errors,
but without raising an exception. | def log_email_config_errors() -> None:
"""
The purpose of this function is to log (potential) config errors,
but without raising an exception.
"""
if settings.EMAIL_HOST_USER and settings.EMAIL_HOST_PASSWORD is None:
logger.error(
"An SMTP username was set (EMAIL_HOST_USER), but password is unset (EMAIL_HOST_PASSWORD). "
"To disable SMTP authentication, set EMAIL_HOST_USER to an empty string."
) |
This function takes a soft-deactivated user, and computes and adds
to the database any UserMessage rows that were not created while
the user was soft-deactivated. The end result is that from the
perspective of the message database, it should be impossible to
tell that the user was soft-deactivated at all.
At a high level, the algorithm is as follows:
* Find all the streams that the user was at any time a subscriber
of when or after they were soft-deactivated (`recipient_ids`
below).
* Find all the messages sent to those streams since the user was
soft-deactivated. This will be a superset of the target
UserMessages we need to create in two ways: (1) some UserMessage
rows will have already been created in do_send_messages because
the user had a nonzero set of flags (the fact that we do so in
do_send_messages simplifies things considerably, since it means
we don't need to inspect message content to look for things like
mentions here), and (2) the user might not have been subscribed
to all of the streams in recipient_ids for the entire time
window.
* Correct the list from the previous state by excluding those with
existing UserMessage rows.
* Correct the list from the previous state by excluding those
where the user wasn't subscribed at the time, using the
RealmAuditLog data to determine exactly when the user was
subscribed/unsubscribed.
* Create the UserMessage rows.
For further documentation, see:
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation | def add_missing_messages(user_profile: UserProfile) -> None:
"""This function takes a soft-deactivated user, and computes and adds
to the database any UserMessage rows that were not created while
the user was soft-deactivated. The end result is that from the
perspective of the message database, it should be impossible to
tell that the user was soft-deactivated at all.
At a high level, the algorithm is as follows:
* Find all the streams that the user was at any time a subscriber
of when or after they were soft-deactivated (`recipient_ids`
below).
* Find all the messages sent to those streams since the user was
soft-deactivated. This will be a superset of the target
UserMessages we need to create in two ways: (1) some UserMessage
rows will have already been created in do_send_messages because
the user had a nonzero set of flags (the fact that we do so in
do_send_messages simplifies things considerably, since it means
we don't need to inspect message content to look for things like
mentions here), and (2) the user might not have been subscribed
to all of the streams in recipient_ids for the entire time
window.
* Correct the list from the previous state by excluding those with
existing UserMessage rows.
* Correct the list from the previous state by excluding those
where the user wasn't subscribed at the time, using the
RealmAuditLog data to determine exactly when the user was
subscribed/unsubscribed.
* Create the UserMessage rows.
For further documentation, see:
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
"""
assert user_profile.last_active_message_id is not None
all_stream_subs = list(
Subscription.objects.filter(
user_profile=user_profile, recipient__type=Recipient.STREAM
).values("recipient_id", "recipient__type_id")
)
# For stream messages we need to check messages against data from
# RealmAuditLog for visibility to user. So we fetch the subscription logs.
stream_ids = [sub["recipient__type_id"] for sub in all_stream_subs]
# We have a partial index on RealmAuditLog for these rows -- if
# this set changes, the partial index must be updated as well, to
# keep this query performant
events = [
RealmAuditLog.SUBSCRIPTION_CREATED,
RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
RealmAuditLog.SUBSCRIPTION_ACTIVATED,
]
# Important: We order first by event_last_message_id, which is the
# official ordering, and then tiebreak by RealmAuditLog event ID.
# That second tiebreak is important in case a user is subscribed
# and then unsubscribed without any messages being sent in the
# meantime. Without that tiebreak, we could end up incorrectly
# processing the ordering of those two subscription changes. Note
# that this means we cannot backfill events unless there are no
# pre-existing events for this stream/user pair!
subscription_logs = list(
RealmAuditLog.objects.filter(
modified_user=user_profile, modified_stream_id__in=stream_ids, event_type__in=events
)
.order_by("event_last_message_id", "id")
.only("id", "event_type", "modified_stream_id", "event_last_message_id")
)
all_stream_subscription_logs: DefaultDict[int, List[RealmAuditLog]] = defaultdict(list)
for log in subscription_logs:
all_stream_subscription_logs[assert_is_not_none(log.modified_stream_id)].append(log)
recipient_ids = []
for sub in all_stream_subs:
stream_subscription_logs = all_stream_subscription_logs[sub["recipient__type_id"]]
if stream_subscription_logs[-1].event_type == RealmAuditLog.SUBSCRIPTION_DEACTIVATED:
assert stream_subscription_logs[-1].event_last_message_id is not None
if (
stream_subscription_logs[-1].event_last_message_id
<= user_profile.last_active_message_id
):
# We are going to short circuit this iteration as its no use
# iterating since user unsubscribed before soft-deactivation
continue
recipient_ids.append(sub["recipient_id"])
new_stream_msgs = (
Message.objects.annotate(
has_user_message=Exists(
UserMessage.objects.filter(
user_profile_id=user_profile,
message_id=OuterRef("id"),
)
)
)
.filter(
# Uses index: zerver_message_realm_recipient_id
has_user_message=0,
realm_id=user_profile.realm_id,
recipient_id__in=recipient_ids,
id__gt=user_profile.last_active_message_id,
)
.order_by("id")
.values("id", "recipient__type_id")
)
stream_messages: DefaultDict[int, List[MissingMessageDict]] = defaultdict(list)
for msg in new_stream_msgs:
stream_messages[msg["recipient__type_id"]].append(
MissingMessageDict(id=msg["id"], recipient__type_id=msg["recipient__type_id"])
)
# Calling this function to filter out stream messages based upon
# subscription logs and then store all UserMessage objects for bulk insert
# This function does not perform any SQL related task and gets all the data
# required for its operation in its params.
message_ids_to_insert = filter_by_subscription_history(
user_profile, stream_messages, all_stream_subscription_logs
)
# Doing a bulk create for all the UserMessage objects stored for creation.
while len(message_ids_to_insert) > 0:
message_ids, message_ids_to_insert = (
message_ids_to_insert[0:BULK_CREATE_BATCH_SIZE],
message_ids_to_insert[BULK_CREATE_BATCH_SIZE:],
)
bulk_insert_all_ums(user_ids=[user_profile.id], message_ids=message_ids, flags=0)
UserProfile.objects.filter(id=user_profile.id).update(
last_active_message_id=Greatest(F("last_active_message_id"), message_ids[-1])
) |
When we're about to send an email/push notification to a
long_term_idle user, it's very likely that the user will try to
return to Zulip. As a result, it makes sense to optimistically
soft-reactivate that user, to give them a good return experience.
It's important that we do nothing for stream wildcard or large
group mentions (size > 'settings.MAX_GROUP_SIZE_FOR_MENTION_REACTIVATION'),
because soft-reactivating an entire realm or a large group would be
very expensive. The caller is responsible for passing a
mentioned_user_group_members_count that is None for messages that
contain both a personal mention and a group mention. | def soft_reactivate_if_personal_notification(
user_profile: UserProfile,
unique_triggers: Set[str],
mentioned_user_group_members_count: Optional[int],
) -> None:
"""When we're about to send an email/push notification to a
long_term_idle user, it's very likely that the user will try to
return to Zulip. As a result, it makes sense to optimistically
soft-reactivate that user, to give them a good return experience.
It's important that we do nothing for stream wildcard or large
group mentions (size > 'settings.MAX_GROUP_SIZE_FOR_MENTION_REACTIVATION'),
because soft-reactivating an entire realm or a large group would be
very expensive. The caller is responsible for passing a
mentioned_user_group_members_count that is None for messages that
contain both a personal mention and a group mention.
"""
if not user_profile.long_term_idle:
return
direct_message = NotificationTriggers.DIRECT_MESSAGE in unique_triggers
personal_mention = False
small_group_mention = False
if NotificationTriggers.MENTION in unique_triggers:
if mentioned_user_group_members_count is None:
personal_mention = True
elif mentioned_user_group_members_count <= settings.MAX_GROUP_SIZE_FOR_MENTION_REACTIVATION:
small_group_mention = True
topic_wildcard_mention = any(
trigger in unique_triggers
for trigger in [
NotificationTriggers.TOPIC_WILDCARD_MENTION,
NotificationTriggers.TOPIC_WILDCARD_MENTION_IN_FOLLOWED_TOPIC,
]
)
if (
not direct_message
and not personal_mention
and not small_group_mention
and not topic_wildcard_mention
):
return
queue_soft_reactivation(user_profile.id) |
Note that stream_dict["name"] is assumed to already be stripped of
whitespace | def create_streams_if_needed(
realm: Realm, stream_dicts: List[StreamDict], acting_user: Optional[UserProfile] = None
) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams: List[Stream] = []
existing_streams: List[Stream] = []
for stream_dict in stream_dicts:
invite_only = stream_dict.get("invite_only", False)
stream, created = create_stream_if_needed(
realm,
stream_dict["name"],
invite_only=invite_only,
is_web_public=stream_dict.get("is_web_public", False),
stream_post_policy=stream_dict.get(
"stream_post_policy", Stream.STREAM_POST_POLICY_EVERYONE
),
history_public_to_subscribers=stream_dict.get("history_public_to_subscribers"),
stream_description=stream_dict.get("description", ""),
message_retention_days=stream_dict.get("message_retention_days", None),
can_remove_subscribers_group=stream_dict.get("can_remove_subscribers_group", None),
acting_user=acting_user,
)
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams |
Common function for backend code where the target use attempts to
access the target stream, returning all the data fetched along the
way. If that user does not have permission to access that stream,
we throw an exception. A design goal is that the error message is
the same for streams you can't access and streams that don't exist. | def access_stream_common(
user_profile: UserProfile,
stream: Stream,
error: str,
require_active: bool = True,
allow_realm_admin: bool = False,
) -> Optional[Subscription]:
"""Common function for backend code where the target use attempts to
access the target stream, returning all the data fetched along the
way. If that user does not have permission to access that stream,
we throw an exception. A design goal is that the error message is
the same for streams you can't access and streams that don't exist."""
# First, we don't allow any access to streams in other realms.
if stream.realm_id != user_profile.realm_id:
# Callers should verify this on their own, so this functions as defensive code.
raise AssertionError("user_profile and stream realms don't match")
try:
assert stream.recipient_id is not None
sub = Subscription.objects.get(
user_profile=user_profile, recipient_id=stream.recipient_id, active=require_active
)
except Subscription.DoesNotExist:
sub = None
if check_basic_stream_access(user_profile, stream, sub, allow_realm_admin=allow_realm_admin):
return sub
# Otherwise it is a private stream and you're not on it, so throw
# an error.
raise JsonableError(error) |
It may seem a little silly to have this helper function for unmuting
topics, but it gets around a linter warning, and it helps to be able
to review all security-related stuff in one place.
Our policy for accessing streams when you unmute a topic is that you
don't necessarily need to have an active subscription or even "legal"
access to the stream. Instead, we just verify the stream_id has been
muted in the past (not here, but in the caller).
Long term, we'll probably have folks just pass us in the id of the
UserTopic row to unmute topics. | def access_stream_to_remove_visibility_policy_by_name(
user_profile: UserProfile, stream_name: str, error: str
) -> Stream:
"""
It may seem a little silly to have this helper function for unmuting
topics, but it gets around a linter warning, and it helps to be able
to review all security-related stuff in one place.
Our policy for accessing streams when you unmute a topic is that you
don't necessarily need to have an active subscription or even "legal"
access to the stream. Instead, we just verify the stream_id has been
muted in the past (not here, but in the caller).
Long term, we'll probably have folks just pass us in the id of the
UserTopic row to unmute topics.
"""
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream |
Determine whether the provided user is allowed to access the
history of the target stream.
This is used by the caller to determine whether this user can get
historical messages before they joined for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
Note that this function should only be used in contexts where
access_stream is being called elsewhere to confirm that the user
can actually see this stream. | def can_access_stream_history(user_profile: UserProfile, stream: Stream) -> bool:
"""Determine whether the provided user is allowed to access the
history of the target stream.
This is used by the caller to determine whether this user can get
historical messages before they joined for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
Note that this function should only be used in contexts where
access_stream is being called elsewhere to confirm that the user
can actually see this stream.
"""
if user_profile.realm_id != stream.realm_id:
raise AssertionError("user_profile and stream realms don't match")
if stream.is_web_public:
return True
if stream.is_history_realm_public() and not user_profile.is_guest:
return True
if stream.is_history_public_to_subscribers():
# In this case, we check if the user is subscribed.
error = _("Invalid channel name '{channel_name}'").format(channel_name=stream.name)
try:
access_stream_common(user_profile, stream, error)
except JsonableError:
return False
return True
return False |
Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name using check_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retrieving the streams
@param autocreate Whether we should create streams if they don't already exist | def list_to_streams(
streams_raw: Collection[StreamDict],
user_profile: UserProfile,
autocreate: bool = False,
unsubscribing_others: bool = False,
is_default_stream: bool = False,
) -> Tuple[List[Stream], List[Stream]]:
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name using check_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retrieving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = {stream_dict["name"] for stream_dict in streams_raw}
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
check_stream_name(stream_name)
existing_streams: List[Stream] = []
missing_stream_dicts: List[StreamDict] = []
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
if unsubscribing_others:
existing_recipient_ids = [stream.recipient_id for stream in existing_stream_map.values()]
subs = Subscription.objects.filter(
user_profile=user_profile, recipient_id__in=existing_recipient_ids, active=True
)
sub_map = {sub.recipient_id: sub for sub in subs}
for stream in existing_stream_map.values():
sub = sub_map.get(stream.recipient_id, None)
if not can_remove_subscribers_from_stream(stream, user_profile, sub):
raise JsonableError(_("Insufficient permission"))
message_retention_days_not_none = False
web_public_stream_requested = False
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
if stream_dict.get("message_retention_days", None) is not None:
message_retention_days_not_none = True
missing_stream_dicts.append(stream_dict)
if autocreate and stream_dict["is_web_public"]:
web_public_stream_requested = True
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams: List[Stream] = []
else:
# autocreate=True path starts here
for stream_dict in missing_stream_dicts:
invite_only = stream_dict.get("invite_only", False)
if invite_only and not user_profile.can_create_private_streams():
raise JsonableError(_("Insufficient permission"))
if not invite_only and not user_profile.can_create_public_streams():
raise JsonableError(_("Insufficient permission"))
if is_default_stream and not user_profile.is_realm_admin:
raise JsonableError(_("Insufficient permission"))
if invite_only and is_default_stream:
raise JsonableError(_("A default channel cannot be private."))
if not autocreate:
raise JsonableError(
_("Channel(s) ({channel_names}) do not exist").format(
channel_names=", ".join(
stream_dict["name"] for stream_dict in missing_stream_dicts
),
)
)
if web_public_stream_requested:
if not user_profile.realm.web_public_streams_enabled():
raise JsonableError(_("Web-public channels are not enabled."))
if not user_profile.can_create_web_public_streams():
# We set create_web_public_stream_policy to allow only organization owners
# to create web-public streams, because of their sensitive nature.
raise JsonableError(_("Insufficient permission"))
if message_retention_days_not_none:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequiredError
user_profile.realm.ensure_not_on_limited_plan()
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(
realm=user_profile.realm, stream_dicts=missing_stream_dicts, acting_user=user_profile
)
existing_streams += dup_streams
return existing_streams, created_streams |
Subsets and Splits