2015-06-17 07:38:38 +03:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
|
|
|
* vim: sw=2 ts=8 et :
|
|
|
|
*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include <map>
|
|
|
|
|
|
|
|
#include <mach/vm_map.h>
|
|
|
|
#include <mach/mach_port.h>
|
2015-09-22 20:59:00 +03:00
|
|
|
#if defined(XP_IOS)
|
2019-01-21 20:18:16 +03:00
|
|
|
# include <mach/vm_map.h>
|
|
|
|
# define mach_vm_address_t vm_address_t
|
|
|
|
# define mach_vm_map vm_map
|
|
|
|
# define mach_vm_read vm_read
|
|
|
|
# define mach_vm_region_recurse vm_region_recurse_64
|
|
|
|
# define mach_vm_size_t vm_size_t
|
2015-09-22 20:59:00 +03:00
|
|
|
#else
|
2019-01-21 20:18:16 +03:00
|
|
|
# include <mach/mach_vm.h>
|
2015-09-22 20:59:00 +03:00
|
|
|
#endif
|
2015-09-18 01:25:44 +03:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <unistd.h>
|
2015-06-17 07:38:38 +03:00
|
|
|
#include "SharedMemoryBasic.h"
|
|
|
|
|
2016-12-10 03:04:47 +03:00
|
|
|
#include "mozilla/IntegerPrintfMacros.h"
|
|
|
|
#include "mozilla/Printf.h"
|
2015-06-17 07:38:38 +03:00
|
|
|
#include "mozilla/StaticMutex.h"
|
Bug 1265824 - Wait on texture handles with IPC r=jld,mattwoodrow
There's a lot going on here, but it all fits under the idea of
being able to communicate about texture locking statuses
without spinning on IsReadLocked. This is a bit of a trade -
we could just always allocate/grab a texture from the pool,
which would put a smaller cap on the amount of time we can
possibly spend when a texture is locked. However, this eats
up more CPU and memory than waiting on the textures to unlock,
and could take longer, especially if there were a large number
of textures which we just need to wait for for a short amount
of time. In any case, we very rarely hit the case where we
actually need to wait on the sync IPC to the compositor - most
of the time the textures are already unlocked.
There is also an async IPC call in here, which we make before
flushing async paints. This just causes the compositor to
check whether the GPU is done with its textures or not and
unlock them if it is. This helps us avoid the case where we
take a long time painting asynchronously, turn IPC back on at
the end of that, and then have to wait for the compositor
to to get into TiledLayerBufferComposite::UseTiles before
getting a response. Specifically this eliminates several talos
regressions which use ASAP mode.
Lastly, there seem to be no other cases of static Monitors
being used. This seems like it falls under similar use cases
as StaticMutexes, so I added it in. I can move it into its own
file if we think it might be generally useful in the future.
MozReview-Commit-ID: IYQLwUqMxg2
--HG--
extra : rebase_source : 4f05832f51dae6db98773dcad03cb008a80eca6c
2018-05-06 01:46:26 +03:00
|
|
|
#include "mozilla/layers/TextureSync.h"
|
2015-06-17 07:38:38 +03:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2019-01-21 20:18:16 +03:00
|
|
|
# define LOG_ERROR(str, args...) \
|
|
|
|
PR_BEGIN_MACRO \
|
|
|
|
mozilla::SmprintfPointer msg = mozilla::Smprintf(str, ##args); \
|
|
|
|
NS_WARNING(msg.get()); \
|
|
|
|
PR_END_MACRO
|
2015-06-17 07:38:38 +03:00
|
|
|
#else
|
2019-01-21 20:18:16 +03:00
|
|
|
# define LOG_ERROR(str, args...) \
|
|
|
|
do { /* nothing */ \
|
|
|
|
} while (0)
|
2015-06-17 07:38:38 +03:00
|
|
|
#endif
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
#define CHECK_MACH_ERROR(kr, msg) \
|
|
|
|
PR_BEGIN_MACRO \
|
|
|
|
if (kr != KERN_SUCCESS) { \
|
|
|
|
LOG_ERROR("%s %s (%x)\n", msg, mach_error_string(kr), kr); \
|
|
|
|
return false; \
|
|
|
|
} \
|
2015-06-17 07:38:38 +03:00
|
|
|
PR_END_MACRO
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code is responsible for sharing memory between processes. Memory can be
|
|
|
|
* shared between parent and child or between two children. Each memory region is
|
|
|
|
* referenced via a Mach port. Mach ports are also used for messaging when
|
|
|
|
* sharing a memory region.
|
|
|
|
*
|
|
|
|
* When the parent starts a child, it starts a thread whose only purpose is to
|
|
|
|
* communicate with the child about shared memory. Once the child has started,
|
|
|
|
* it starts a similar thread for communicating with the parent. Each side can
|
|
|
|
* communicate with the thread on the other side via Mach ports. When either
|
|
|
|
* side wants to share memory with the other, it sends a Mach message to the
|
|
|
|
* other side. Attached to the message is the port that references the shared
|
|
|
|
* memory region. When the other side receives the message, it automatically
|
|
|
|
* gets access to the region. It sends a reply (also via a Mach port) so that
|
|
|
|
* the originating side can continue.
|
|
|
|
*
|
|
|
|
* The two sides communicate using four ports. Two ports are used when the
|
|
|
|
* parent shares memory with the child. The other two are used when the child
|
|
|
|
* shares memory with the parent. One of these two ports is used for sending the
|
|
|
|
* "share" message and the other is used for the reply.
|
|
|
|
*
|
|
|
|
* If a child wants to share memory with another child, it sends a "GetPorts"
|
|
|
|
* message to the parent. The parent forwards this GetPorts message to the
|
|
|
|
* target child. The message includes some ports so that the children can talk
|
|
|
|
* directly. Both children start up a thread to communicate with the other child,
|
|
|
|
* similar to the way parent and child communicate. In the future, when these
|
|
|
|
* two children want to communicate, they re-use the channels that were created.
|
|
|
|
*
|
|
|
|
* When a child shuts down, the parent notifies all other children. Those
|
|
|
|
* children then have the opportunity to shut down any threads they might have
|
|
|
|
* been using to communicate directly with that child.
|
|
|
|
*/
|
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
namespace ipc {
|
|
|
|
|
|
|
|
// Protects gMemoryCommPorts and gThreads.
|
|
|
|
static StaticMutex gMutex;
|
|
|
|
static std::map<pid_t, MemoryPorts> gMemoryCommPorts;
|
|
|
|
|
|
|
|
const int kTimeout = 1000;
|
2015-12-24 20:54:07 +03:00
|
|
|
const int kLongTimeout = 60 * kTimeout;
|
2015-06-17 07:38:38 +03:00
|
|
|
|
|
|
|
pid_t gParentPid = 0;
|
|
|
|
|
|
|
|
struct PIDPair {
|
|
|
|
pid_t mRequester;
|
|
|
|
pid_t mRequested;
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
PIDPair(pid_t requester, pid_t requested) : mRequester(requester), mRequested(requested) {}
|
2015-06-17 07:38:38 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ListeningThread {
|
|
|
|
pthread_t mThread;
|
|
|
|
MemoryPorts* mPorts;
|
|
|
|
|
2016-11-17 16:08:41 +03:00
|
|
|
ListeningThread() = default;
|
2019-01-21 20:18:16 +03:00
|
|
|
ListeningThread(pthread_t thread, MemoryPorts* ports) : mThread(thread), mPorts(ports) {}
|
2015-06-17 07:38:38 +03:00
|
|
|
};
|
|
|
|
|
2015-09-25 13:30:46 +03:00
|
|
|
struct SharePortsReply {
|
|
|
|
uint64_t serial;
|
|
|
|
mach_port_t port;
|
|
|
|
};
|
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
std::map<pid_t, ListeningThread> gThreads;
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
static void* PortServerThread(void* argument);
|
2015-06-17 07:38:38 +03:00
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
static void SetupMachMemory(pid_t pid, ReceivePort* listen_port, MachPortSender* listen_port_ack,
|
|
|
|
MachPortSender* send_port, ReceivePort* send_port_ack,
|
|
|
|
bool pidIsParent) {
|
2015-06-17 07:38:38 +03:00
|
|
|
if (pidIsParent) {
|
|
|
|
gParentPid = pid;
|
|
|
|
}
|
2016-11-17 16:07:35 +03:00
|
|
|
auto* listen_ports = new MemoryPorts(listen_port_ack, listen_port);
|
2015-06-17 07:38:38 +03:00
|
|
|
pthread_t thread;
|
|
|
|
pthread_attr_t attr;
|
|
|
|
pthread_attr_init(&attr);
|
|
|
|
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
|
Bug 1265824 - Wait on texture handles with IPC r=jld,mattwoodrow
There's a lot going on here, but it all fits under the idea of
being able to communicate about texture locking statuses
without spinning on IsReadLocked. This is a bit of a trade -
we could just always allocate/grab a texture from the pool,
which would put a smaller cap on the amount of time we can
possibly spend when a texture is locked. However, this eats
up more CPU and memory than waiting on the textures to unlock,
and could take longer, especially if there were a large number
of textures which we just need to wait for for a short amount
of time. In any case, we very rarely hit the case where we
actually need to wait on the sync IPC to the compositor - most
of the time the textures are already unlocked.
There is also an async IPC call in here, which we make before
flushing async paints. This just causes the compositor to
check whether the GPU is done with its textures or not and
unlock them if it is. This helps us avoid the case where we
take a long time painting asynchronously, turn IPC back on at
the end of that, and then have to wait for the compositor
to to get into TiledLayerBufferComposite::UseTiles before
getting a response. Specifically this eliminates several talos
regressions which use ASAP mode.
Lastly, there seem to be no other cases of static Monitors
being used. This seems like it falls under similar use cases
as StaticMutexes, so I added it in. I can move it into its own
file if we think it might be generally useful in the future.
MozReview-Commit-ID: IYQLwUqMxg2
--HG--
extra : rebase_source : 4f05832f51dae6db98773dcad03cb008a80eca6c
2018-05-06 01:46:26 +03:00
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
int err = pthread_create(&thread, &attr, PortServerThread, listen_ports);
|
|
|
|
if (err) {
|
|
|
|
LOG_ERROR("pthread_create failed with %x\n", err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
gMutex.AssertCurrentThreadOwns();
|
|
|
|
gThreads[pid] = ListeningThread(thread, listen_ports);
|
|
|
|
gMemoryCommPorts[pid] = MemoryPorts(send_port, send_port_ack);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send two communication ports to another process along with the pid of the process that is
|
|
|
|
// listening on them.
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SendPortsMessage(MachPortSender* sender, mach_port_t ports_in_receiver,
|
|
|
|
mach_port_t ports_out_receiver, PIDPair pid_pair) {
|
2015-06-17 07:38:38 +03:00
|
|
|
MachSendMessage getPortsMsg(kGetPortsMsg);
|
|
|
|
if (!getPortsMsg.AddDescriptor(MachMsgPortDescriptor(ports_in_receiver))) {
|
|
|
|
LOG_ERROR("Adding descriptor to message failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!getPortsMsg.AddDescriptor(MachMsgPortDescriptor(ports_out_receiver))) {
|
|
|
|
LOG_ERROR("Adding descriptor to message failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
getPortsMsg.SetData(&pid_pair, sizeof(PIDPair));
|
|
|
|
kern_return_t err = sender->SendMessage(getPortsMsg, kTimeout);
|
|
|
|
if (KERN_SUCCESS != err) {
|
2019-01-21 20:18:16 +03:00
|
|
|
LOG_ERROR("Error sending get ports message %s (%x)\n", mach_error_string(err), err);
|
2015-06-17 07:38:38 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Receive two communication ports from another process
|
2019-01-21 20:18:16 +03:00
|
|
|
bool RecvPortsMessage(ReceivePort* receiver, mach_port_t* ports_in_sender,
|
|
|
|
mach_port_t* ports_out_sender) {
|
2015-06-17 07:38:38 +03:00
|
|
|
MachReceiveMessage rcvPortsMsg;
|
|
|
|
kern_return_t err = receiver->WaitForMessage(&rcvPortsMsg, kTimeout);
|
|
|
|
if (KERN_SUCCESS != err) {
|
2019-01-21 20:18:16 +03:00
|
|
|
LOG_ERROR("Error receiving get ports message %s (%x)\n", mach_error_string(err), err);
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
if (rcvPortsMsg.GetTranslatedPort(0) == MACH_PORT_NULL) {
|
|
|
|
LOG_ERROR("GetTranslatedPort(0) failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*ports_in_sender = rcvPortsMsg.GetTranslatedPort(0);
|
|
|
|
|
|
|
|
if (rcvPortsMsg.GetTranslatedPort(1) == MACH_PORT_NULL) {
|
|
|
|
LOG_ERROR("GetTranslatedPort(1) failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*ports_out_sender = rcvPortsMsg.GetTranslatedPort(1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send two communication ports to another process and receive two back
|
2019-01-21 20:18:16 +03:00
|
|
|
bool RequestPorts(const MemoryPorts& request_ports, mach_port_t ports_in_receiver,
|
|
|
|
mach_port_t* ports_in_sender, mach_port_t* ports_out_sender,
|
|
|
|
mach_port_t ports_out_receiver, PIDPair pid_pair) {
|
2015-06-17 07:38:38 +03:00
|
|
|
if (!SendPortsMessage(request_ports.mSender, ports_in_receiver, ports_out_receiver, pid_pair)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return RecvPortsMessage(request_ports.mReceiver, ports_in_sender, ports_out_sender);
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
MemoryPorts* GetMemoryPortsForPid(pid_t pid) {
|
2015-06-17 07:38:38 +03:00
|
|
|
gMutex.AssertCurrentThreadOwns();
|
|
|
|
|
|
|
|
if (gMemoryCommPorts.find(pid) == gMemoryCommPorts.end()) {
|
|
|
|
// We don't have the ports open to communicate with that pid, so we're going to
|
|
|
|
// ask our parent process over IPC to set them up for us.
|
|
|
|
if (gParentPid == 0) {
|
|
|
|
// If we're the top level parent process, we have no parent to ask.
|
|
|
|
LOG_ERROR("request for ports for pid %d, but we're the chrome process\n", pid);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
const MemoryPorts& parent = gMemoryCommPorts[gParentPid];
|
|
|
|
|
|
|
|
// Create two receiving ports in this process to send to the parent. One will be used for
|
|
|
|
// for listening for incoming memory to be shared, the other for getting the Handle of
|
|
|
|
// memory we share to the other process.
|
2016-11-17 16:07:35 +03:00
|
|
|
auto* ports_in_receiver = new ReceivePort();
|
|
|
|
auto* ports_out_receiver = new ReceivePort();
|
2015-06-17 07:38:38 +03:00
|
|
|
mach_port_t raw_ports_in_sender, raw_ports_out_sender;
|
2019-01-21 20:18:16 +03:00
|
|
|
if (!RequestPorts(parent, ports_in_receiver->GetPort(), &raw_ports_in_sender,
|
|
|
|
&raw_ports_out_sender, ports_out_receiver->GetPort(),
|
2015-06-17 07:38:38 +03:00
|
|
|
PIDPair(getpid(), pid))) {
|
|
|
|
LOG_ERROR("failed to request ports\n");
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// Our parent process sent us two ports, one is for sending new memory to, the other
|
|
|
|
// is for replying with the Handle when we receive new memory.
|
2016-11-17 16:07:35 +03:00
|
|
|
auto* ports_in_sender = new MachPortSender(raw_ports_in_sender);
|
|
|
|
auto* ports_out_sender = new MachPortSender(raw_ports_out_sender);
|
2019-01-21 20:18:16 +03:00
|
|
|
SetupMachMemory(pid, ports_in_receiver, ports_in_sender, ports_out_sender, ports_out_receiver,
|
2015-06-17 07:38:38 +03:00
|
|
|
false);
|
|
|
|
MOZ_ASSERT(gMemoryCommPorts.find(pid) != gMemoryCommPorts.end());
|
|
|
|
}
|
|
|
|
return &gMemoryCommPorts.at(pid);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We just received a port representing a region of shared memory, reply to
|
2019-05-22 04:33:46 +03:00
|
|
|
// the process that set it with the mach_port_t that represents it in this
|
|
|
|
// process. That will be the Handle to be shared over normal IPC.
|
|
|
|
//
|
|
|
|
// WARNING: this function is called while gMutex is not held and must not
|
|
|
|
// reference structures protected by gMutex. See the deadlock warning in
|
|
|
|
// ShareToProcess().
|
2019-01-21 20:18:16 +03:00
|
|
|
void HandleSharePortsMessage(MachReceiveMessage* rmsg, MemoryPorts* ports) {
|
2015-06-17 07:38:38 +03:00
|
|
|
mach_port_t port = rmsg->GetTranslatedPort(0);
|
2015-09-25 13:30:46 +03:00
|
|
|
uint64_t* serial = reinterpret_cast<uint64_t*>(rmsg->GetData());
|
2015-06-17 07:38:38 +03:00
|
|
|
MachSendMessage msg(kReturnIdMsg);
|
2015-09-25 13:30:46 +03:00
|
|
|
// Construct the reply message, echoing the serial, and adding the port
|
|
|
|
SharePortsReply replydata;
|
|
|
|
replydata.port = port;
|
|
|
|
replydata.serial = *serial;
|
|
|
|
msg.SetData(&replydata, sizeof(SharePortsReply));
|
2015-06-17 07:38:38 +03:00
|
|
|
kern_return_t err = ports->mSender->SendMessage(msg, kTimeout);
|
|
|
|
if (KERN_SUCCESS != err) {
|
|
|
|
LOG_ERROR("SendMessage failed 0x%x %s\n", err, mach_error_string(err));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We were asked by another process to get communications ports to some process. Return
|
|
|
|
// those ports via an IPC message.
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SendReturnPortsMsg(MachPortSender* sender, mach_port_t raw_ports_in_sender,
|
|
|
|
mach_port_t raw_ports_out_sender) {
|
2015-06-17 07:38:38 +03:00
|
|
|
MachSendMessage getPortsMsg(kReturnPortsMsg);
|
|
|
|
if (!getPortsMsg.AddDescriptor(MachMsgPortDescriptor(raw_ports_in_sender))) {
|
|
|
|
LOG_ERROR("Adding descriptor to message failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!getPortsMsg.AddDescriptor(MachMsgPortDescriptor(raw_ports_out_sender))) {
|
|
|
|
LOG_ERROR("Adding descriptor to message failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
kern_return_t err = sender->SendMessage(getPortsMsg, kTimeout);
|
|
|
|
if (KERN_SUCCESS != err) {
|
2019-01-21 20:18:16 +03:00
|
|
|
LOG_ERROR("Error sending get ports message %s (%x)\n", mach_error_string(err), err);
|
2015-06-17 07:38:38 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We were asked for communcations ports to a process that isn't us. Assuming that process
|
|
|
|
// is one of our children, forward that request on.
|
2019-01-21 20:18:16 +03:00
|
|
|
void ForwardGetPortsMessage(MachReceiveMessage* rmsg, MemoryPorts* ports, PIDPair* pid_pair) {
|
2015-06-17 07:38:38 +03:00
|
|
|
if (rmsg->GetTranslatedPort(0) == MACH_PORT_NULL) {
|
|
|
|
LOG_ERROR("GetTranslatedPort(0) failed");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (rmsg->GetTranslatedPort(1) == MACH_PORT_NULL) {
|
|
|
|
LOG_ERROR("GetTranslatedPort(1) failed");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mach_port_t raw_ports_in_sender, raw_ports_out_sender;
|
|
|
|
MemoryPorts* requestedPorts = GetMemoryPortsForPid(pid_pair->mRequested);
|
|
|
|
if (!requestedPorts) {
|
|
|
|
LOG_ERROR("failed to find port for process\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!RequestPorts(*requestedPorts, rmsg->GetTranslatedPort(0), &raw_ports_in_sender,
|
|
|
|
&raw_ports_out_sender, rmsg->GetTranslatedPort(1), *pid_pair)) {
|
|
|
|
LOG_ERROR("failed to request ports\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SendReturnPortsMsg(ports->mSender, raw_ports_in_sender, raw_ports_out_sender);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We receieved a message asking us to get communications ports for another process
|
2019-01-21 20:18:16 +03:00
|
|
|
void HandleGetPortsMessage(MachReceiveMessage* rmsg, MemoryPorts* ports) {
|
2015-06-17 07:38:38 +03:00
|
|
|
PIDPair* pid_pair;
|
|
|
|
if (rmsg->GetDataLength() != sizeof(PIDPair)) {
|
|
|
|
LOG_ERROR("Improperly formatted message\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pid_pair = reinterpret_cast<PIDPair*>(rmsg->GetData());
|
|
|
|
if (pid_pair->mRequested != getpid()) {
|
|
|
|
// This request is for ports to a process that isn't us, forward it to that process
|
|
|
|
ForwardGetPortsMessage(rmsg, ports, pid_pair);
|
|
|
|
} else {
|
|
|
|
if (rmsg->GetTranslatedPort(0) == MACH_PORT_NULL) {
|
|
|
|
LOG_ERROR("GetTranslatedPort(0) failed");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rmsg->GetTranslatedPort(1) == MACH_PORT_NULL) {
|
|
|
|
LOG_ERROR("GetTranslatedPort(1) failed");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-11-17 16:07:35 +03:00
|
|
|
auto* ports_in_sender = new MachPortSender(rmsg->GetTranslatedPort(0));
|
|
|
|
auto* ports_out_sender = new MachPortSender(rmsg->GetTranslatedPort(1));
|
2015-06-17 07:38:38 +03:00
|
|
|
|
2016-11-17 16:07:35 +03:00
|
|
|
auto* ports_in_receiver = new ReceivePort();
|
|
|
|
auto* ports_out_receiver = new ReceivePort();
|
2019-01-21 20:18:16 +03:00
|
|
|
if (SendReturnPortsMsg(ports->mSender, ports_in_receiver->GetPort(),
|
|
|
|
ports_out_receiver->GetPort())) {
|
|
|
|
SetupMachMemory(pid_pair->mRequester, ports_out_receiver, ports_out_sender, ports_in_sender,
|
|
|
|
ports_in_receiver, false);
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
static void* PortServerThread(void* argument) {
|
2019-06-07 00:07:29 +03:00
|
|
|
pthread_setname_np("PortServerThread");
|
2015-06-17 07:38:38 +03:00
|
|
|
MemoryPorts* ports = static_cast<MemoryPorts*>(argument);
|
|
|
|
MachReceiveMessage child_message;
|
|
|
|
while (true) {
|
|
|
|
MachReceiveMessage rmsg;
|
|
|
|
kern_return_t err = ports->mReceiver->WaitForMessage(&rmsg, MACH_MSG_TIMEOUT_NONE);
|
|
|
|
if (err != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("Wait for message failed 0x%x %s\n", err, mach_error_string(err));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (rmsg.GetMessageID() == kShutdownMsg) {
|
|
|
|
delete ports->mSender;
|
|
|
|
delete ports->mReceiver;
|
|
|
|
delete ports;
|
|
|
|
return nullptr;
|
|
|
|
}
|
Bug 1265824 - Wait on texture handles with IPC r=jld,mattwoodrow
There's a lot going on here, but it all fits under the idea of
being able to communicate about texture locking statuses
without spinning on IsReadLocked. This is a bit of a trade -
we could just always allocate/grab a texture from the pool,
which would put a smaller cap on the amount of time we can
possibly spend when a texture is locked. However, this eats
up more CPU and memory than waiting on the textures to unlock,
and could take longer, especially if there were a large number
of textures which we just need to wait for for a short amount
of time. In any case, we very rarely hit the case where we
actually need to wait on the sync IPC to the compositor - most
of the time the textures are already unlocked.
There is also an async IPC call in here, which we make before
flushing async paints. This just causes the compositor to
check whether the GPU is done with its textures or not and
unlock them if it is. This helps us avoid the case where we
take a long time painting asynchronously, turn IPC back on at
the end of that, and then have to wait for the compositor
to to get into TiledLayerBufferComposite::UseTiles before
getting a response. Specifically this eliminates several talos
regressions which use ASAP mode.
Lastly, there seem to be no other cases of static Monitors
being used. This seems like it falls under similar use cases
as StaticMutexes, so I added it in. I can move it into its own
file if we think it might be generally useful in the future.
MozReview-Commit-ID: IYQLwUqMxg2
--HG--
extra : rebase_source : 4f05832f51dae6db98773dcad03cb008a80eca6c
2018-05-06 01:46:26 +03:00
|
|
|
if (rmsg.GetMessageID() == kWaitForTexturesMsg) {
|
|
|
|
layers::TextureSync::HandleWaitForTexturesMessage(&rmsg, ports);
|
|
|
|
} else if (rmsg.GetMessageID() == kUpdateTextureLocksMsg) {
|
|
|
|
layers::TextureSync::DispatchCheckTexturesForUnlock();
|
|
|
|
} else {
|
|
|
|
switch (rmsg.GetMessageID()) {
|
2019-05-22 04:33:46 +03:00
|
|
|
case kSharePortsMsg: {
|
|
|
|
// Don't acquire gMutex here while calling HandleSharePortsMessage()
|
|
|
|
// to avoid deadlock. If gMutex is held by ShareToProcess(), we will
|
|
|
|
// block and create the following deadlock chain.
|
|
|
|
//
|
|
|
|
// 1) local:PortServerThread() blocked on local:gMutex held by
|
|
|
|
// 2) local:ShareToProcess() waiting for reply from
|
|
|
|
// 3) peer:PortServerThread() blocked on peer:gMutex held by
|
|
|
|
// 4) peer:ShareToProcess() waiting for reply from 1.
|
|
|
|
//
|
|
|
|
// It's safe to call HandleSharePortsMessage() without gMutex
|
|
|
|
// because HandleSharePortsMessage() only sends an outgoing message
|
|
|
|
// without referencing data structures protected by gMutex. The
|
|
|
|
// |ports| struct is deallocated on this thread in the kShutdownMsg
|
|
|
|
// message handling before this thread exits.
|
2019-01-21 20:18:16 +03:00
|
|
|
HandleSharePortsMessage(&rmsg, ports);
|
|
|
|
break;
|
2019-05-22 04:33:46 +03:00
|
|
|
}
|
|
|
|
case kGetPortsMsg: {
|
|
|
|
StaticMutexAutoLock smal(gMutex);
|
2019-01-21 20:18:16 +03:00
|
|
|
HandleGetPortsMessage(&rmsg, ports);
|
|
|
|
break;
|
2019-05-22 04:33:46 +03:00
|
|
|
}
|
|
|
|
case kCleanupMsg: {
|
|
|
|
StaticMutexAutoLock smal(gMutex);
|
2019-01-21 20:18:16 +03:00
|
|
|
if (gParentPid == 0) {
|
|
|
|
LOG_ERROR("Cleanup message not valid for parent process");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pid_t* pid;
|
|
|
|
if (rmsg.GetDataLength() != sizeof(pid_t)) {
|
|
|
|
LOG_ERROR("Improperly formatted message\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
pid = reinterpret_cast<pid_t*>(rmsg.GetData());
|
|
|
|
SharedMemoryBasic::CleanupForPid(*pid);
|
|
|
|
break;
|
2019-05-22 04:33:46 +03:00
|
|
|
}
|
|
|
|
default: {
|
|
|
|
// gMutex not required
|
2019-01-21 20:18:16 +03:00
|
|
|
LOG_ERROR("Unknown message\n");
|
2019-05-22 04:33:46 +03:00
|
|
|
}
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
void SharedMemoryBasic::SetupMachMemory(pid_t pid, ReceivePort* listen_port,
|
|
|
|
MachPortSender* listen_port_ack, MachPortSender* send_port,
|
|
|
|
ReceivePort* send_port_ack, bool pidIsParent) {
|
2015-06-17 07:38:38 +03:00
|
|
|
StaticMutexAutoLock smal(gMutex);
|
2019-01-21 20:18:16 +03:00
|
|
|
mozilla::ipc::SetupMachMemory(pid, listen_port, listen_port_ack, send_port, send_port_ack,
|
|
|
|
pidIsParent);
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
void SharedMemoryBasic::Shutdown() {
|
2015-06-17 07:38:38 +03:00
|
|
|
StaticMutexAutoLock smal(gMutex);
|
|
|
|
|
Bug 1265824 - Wait on texture handles with IPC r=jld,mattwoodrow
There's a lot going on here, but it all fits under the idea of
being able to communicate about texture locking statuses
without spinning on IsReadLocked. This is a bit of a trade -
we could just always allocate/grab a texture from the pool,
which would put a smaller cap on the amount of time we can
possibly spend when a texture is locked. However, this eats
up more CPU and memory than waiting on the textures to unlock,
and could take longer, especially if there were a large number
of textures which we just need to wait for for a short amount
of time. In any case, we very rarely hit the case where we
actually need to wait on the sync IPC to the compositor - most
of the time the textures are already unlocked.
There is also an async IPC call in here, which we make before
flushing async paints. This just causes the compositor to
check whether the GPU is done with its textures or not and
unlock them if it is. This helps us avoid the case where we
take a long time painting asynchronously, turn IPC back on at
the end of that, and then have to wait for the compositor
to to get into TiledLayerBufferComposite::UseTiles before
getting a response. Specifically this eliminates several talos
regressions which use ASAP mode.
Lastly, there seem to be no other cases of static Monitors
being used. This seems like it falls under similar use cases
as StaticMutexes, so I added it in. I can move it into its own
file if we think it might be generally useful in the future.
MozReview-Commit-ID: IYQLwUqMxg2
--HG--
extra : rebase_source : 4f05832f51dae6db98773dcad03cb008a80eca6c
2018-05-06 01:46:26 +03:00
|
|
|
layers::TextureSync::Shutdown();
|
|
|
|
|
2016-11-17 16:06:25 +03:00
|
|
|
for (auto& thread : gThreads) {
|
2015-06-17 07:38:38 +03:00
|
|
|
MachSendMessage shutdownMsg(kShutdownMsg);
|
2016-11-17 16:06:25 +03:00
|
|
|
thread.second.mPorts->mReceiver->SendMessageToSelf(shutdownMsg, kTimeout);
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
gThreads.clear();
|
|
|
|
|
2016-11-17 16:06:25 +03:00
|
|
|
for (auto& memoryCommPort : gMemoryCommPorts) {
|
|
|
|
delete memoryCommPort.second.mSender;
|
|
|
|
delete memoryCommPort.second.mReceiver;
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
gMemoryCommPorts.clear();
|
|
|
|
}
|
|
|
|
|
2019-10-25 08:17:54 +03:00
|
|
|
void SharedMemoryBasic::CleanupForPidWithLock(pid_t pid) {
|
|
|
|
StaticMutexAutoLock smal(gMutex);
|
|
|
|
CleanupForPid(pid);
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
void SharedMemoryBasic::CleanupForPid(pid_t pid) {
|
2019-10-25 08:17:54 +03:00
|
|
|
gMutex.AssertCurrentThreadOwns();
|
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
if (gThreads.find(pid) == gThreads.end()) {
|
|
|
|
return;
|
|
|
|
}
|
Bug 1265824 - Wait on texture handles with IPC r=jld,mattwoodrow
There's a lot going on here, but it all fits under the idea of
being able to communicate about texture locking statuses
without spinning on IsReadLocked. This is a bit of a trade -
we could just always allocate/grab a texture from the pool,
which would put a smaller cap on the amount of time we can
possibly spend when a texture is locked. However, this eats
up more CPU and memory than waiting on the textures to unlock,
and could take longer, especially if there were a large number
of textures which we just need to wait for for a short amount
of time. In any case, we very rarely hit the case where we
actually need to wait on the sync IPC to the compositor - most
of the time the textures are already unlocked.
There is also an async IPC call in here, which we make before
flushing async paints. This just causes the compositor to
check whether the GPU is done with its textures or not and
unlock them if it is. This helps us avoid the case where we
take a long time painting asynchronously, turn IPC back on at
the end of that, and then have to wait for the compositor
to to get into TiledLayerBufferComposite::UseTiles before
getting a response. Specifically this eliminates several talos
regressions which use ASAP mode.
Lastly, there seem to be no other cases of static Monitors
being used. This seems like it falls under similar use cases
as StaticMutexes, so I added it in. I can move it into its own
file if we think it might be generally useful in the future.
MozReview-Commit-ID: IYQLwUqMxg2
--HG--
extra : rebase_source : 4f05832f51dae6db98773dcad03cb008a80eca6c
2018-05-06 01:46:26 +03:00
|
|
|
|
|
|
|
layers::TextureSync::CleanupForPid(pid);
|
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
const ListeningThread& listeningThread = gThreads[pid];
|
|
|
|
MachSendMessage shutdownMsg(kShutdownMsg);
|
|
|
|
kern_return_t ret = listeningThread.mPorts->mReceiver->SendMessageToSelf(shutdownMsg, kTimeout);
|
|
|
|
if (ret != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("sending shutdown msg failed %s %x\n", mach_error_string(ret), ret);
|
|
|
|
}
|
|
|
|
gThreads.erase(pid);
|
|
|
|
|
|
|
|
if (gParentPid == 0) {
|
|
|
|
// We're the parent. Broadcast the cleanup message to everyone else.
|
2016-11-17 16:06:25 +03:00
|
|
|
for (auto& memoryCommPort : gMemoryCommPorts) {
|
2015-06-17 07:38:38 +03:00
|
|
|
MachSendMessage msg(kCleanupMsg);
|
|
|
|
msg.SetData(&pid, sizeof(pid));
|
|
|
|
// We don't really care if this fails, we could be trying to send to an already shut down proc
|
2016-11-17 16:06:25 +03:00
|
|
|
memoryCommPort.second.mSender->SendMessage(msg, kTimeout);
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryPorts& ports = gMemoryCommPorts[pid];
|
|
|
|
delete ports.mSender;
|
|
|
|
delete ports.mReceiver;
|
|
|
|
gMemoryCommPorts.erase(pid);
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SharedMemoryBasic::SendMachMessage(pid_t pid, MachSendMessage& message,
|
|
|
|
MachReceiveMessage* response) {
|
Bug 1265824 - Wait on texture handles with IPC r=jld,mattwoodrow
There's a lot going on here, but it all fits under the idea of
being able to communicate about texture locking statuses
without spinning on IsReadLocked. This is a bit of a trade -
we could just always allocate/grab a texture from the pool,
which would put a smaller cap on the amount of time we can
possibly spend when a texture is locked. However, this eats
up more CPU and memory than waiting on the textures to unlock,
and could take longer, especially if there were a large number
of textures which we just need to wait for for a short amount
of time. In any case, we very rarely hit the case where we
actually need to wait on the sync IPC to the compositor - most
of the time the textures are already unlocked.
There is also an async IPC call in here, which we make before
flushing async paints. This just causes the compositor to
check whether the GPU is done with its textures or not and
unlock them if it is. This helps us avoid the case where we
take a long time painting asynchronously, turn IPC back on at
the end of that, and then have to wait for the compositor
to to get into TiledLayerBufferComposite::UseTiles before
getting a response. Specifically this eliminates several talos
regressions which use ASAP mode.
Lastly, there seem to be no other cases of static Monitors
being used. This seems like it falls under similar use cases
as StaticMutexes, so I added it in. I can move it into its own
file if we think it might be generally useful in the future.
MozReview-Commit-ID: IYQLwUqMxg2
--HG--
extra : rebase_source : 4f05832f51dae6db98773dcad03cb008a80eca6c
2018-05-06 01:46:26 +03:00
|
|
|
StaticMutexAutoLock smal(gMutex);
|
|
|
|
ipc::MemoryPorts* ports = GetMemoryPortsForPid(pid);
|
|
|
|
if (!ports) {
|
|
|
|
LOG_ERROR("Unable to get ports for process.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
kern_return_t err = ports->mSender->SendMessage(message, kTimeout);
|
|
|
|
if (err != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("Failed updating texture locks.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (response) {
|
|
|
|
err = ports->mReceiver->WaitForMessage(response, kTimeout);
|
|
|
|
if (err != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("short timeout didn't get an id %s %x\n", mach_error_string(err), err);
|
|
|
|
err = ports->mReceiver->WaitForMessage(response, kLongTimeout);
|
|
|
|
|
|
|
|
if (err != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("long timeout didn't get an id %s %x\n", mach_error_string(err), err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
SharedMemoryBasic::SharedMemoryBasic()
|
2019-01-21 20:18:16 +03:00
|
|
|
: mPort(MACH_PORT_NULL), mMemory(nullptr), mOpenRights(RightsReadWrite) {}
|
2015-06-17 07:38:38 +03:00
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
SharedMemoryBasic::~SharedMemoryBasic() {
|
2016-02-18 18:56:15 +03:00
|
|
|
Unmap();
|
|
|
|
CloseHandle();
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SharedMemoryBasic::SetHandle(const Handle& aHandle, OpenRights aRights) {
|
2016-02-18 18:56:15 +03:00
|
|
|
MOZ_ASSERT(mPort == MACH_PORT_NULL, "already initialized");
|
|
|
|
|
|
|
|
mPort = aHandle;
|
2017-04-18 19:24:58 +03:00
|
|
|
mOpenRights = aRights;
|
2016-02-18 18:56:15 +03:00
|
|
|
return true;
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
static inline void* toPointer(mach_vm_address_t address) {
|
2015-06-17 07:38:38 +03:00
|
|
|
return reinterpret_cast<void*>(static_cast<uintptr_t>(address));
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
static inline mach_vm_address_t toVMAddress(void* pointer) {
|
2015-06-17 07:38:38 +03:00
|
|
|
return static_cast<mach_vm_address_t>(reinterpret_cast<uintptr_t>(pointer));
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SharedMemoryBasic::Create(size_t size) {
|
2017-04-18 19:24:58 +03:00
|
|
|
MOZ_ASSERT(mPort == MACH_PORT_NULL, "already initialized");
|
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
memory_object_size_t memoryObjectSize = round_page(size);
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
kern_return_t kr =
|
|
|
|
mach_make_memory_entry_64(mach_task_self(), &memoryObjectSize, 0,
|
|
|
|
MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT, &mPort, MACH_PORT_NULL);
|
2018-03-06 21:21:54 +03:00
|
|
|
if (kr != KERN_SUCCESS || memoryObjectSize < round_page(size)) {
|
2019-01-21 20:18:16 +03:00
|
|
|
LOG_ERROR("Failed to make memory entry (%zu bytes). %s (%x)\n", size, mach_error_string(kr),
|
|
|
|
kr);
|
2018-03-06 21:21:54 +03:00
|
|
|
CloseHandle();
|
2015-06-17 07:38:38 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Mapped(size);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-03-22 03:11:51 +03:00
|
|
|
bool SharedMemoryBasic::Map(size_t size, void* fixed_address) {
|
2018-06-01 20:43:54 +03:00
|
|
|
MOZ_ASSERT(mMemory == nullptr);
|
2015-06-17 07:38:38 +03:00
|
|
|
|
|
|
|
if (MACH_PORT_NULL == mPort) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
kern_return_t kr;
|
2019-03-22 03:11:51 +03:00
|
|
|
mach_vm_address_t address = toVMAddress(fixed_address);
|
2015-06-17 07:38:38 +03:00
|
|
|
|
2017-04-18 19:24:58 +03:00
|
|
|
vm_prot_t vmProtection = VM_PROT_READ;
|
|
|
|
if (mOpenRights == RightsReadWrite) {
|
|
|
|
vmProtection |= VM_PROT_WRITE;
|
|
|
|
}
|
2015-06-17 07:38:38 +03:00
|
|
|
|
2019-03-22 03:11:51 +03:00
|
|
|
kr = mach_vm_map(mach_task_self(), &address, round_page(size), 0,
|
|
|
|
fixed_address ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE, mPort, 0, false,
|
|
|
|
vmProtection, vmProtection, VM_INHERIT_NONE);
|
2015-06-17 07:38:38 +03:00
|
|
|
if (kr != KERN_SUCCESS) {
|
2019-03-22 03:11:51 +03:00
|
|
|
if (!fixed_address) {
|
|
|
|
LOG_ERROR("Failed to map shared memory (%zu bytes) into %x, port %x. %s (%x)\n", size,
|
|
|
|
mach_task_self(), mPort, mach_error_string(kr), kr);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fixed_address && fixed_address != toPointer(address)) {
|
|
|
|
kr = vm_deallocate(mach_task_self(), address, size);
|
|
|
|
if (kr != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("Failed to unmap shared memory at unsuitable address "
|
|
|
|
"(%zu bytes) from %x, port %x. %s (%x)\n",
|
|
|
|
size, mach_task_self(), mPort, mach_error_string(kr), kr);
|
|
|
|
}
|
2015-06-17 07:38:38 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
mMemory = toPointer(address);
|
|
|
|
Mapped(size);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-03-22 03:11:51 +03:00
|
|
|
void* SharedMemoryBasic::FindFreeAddressSpace(size_t size) {
|
|
|
|
mach_vm_address_t address = 0;
|
|
|
|
size = round_page(size);
|
|
|
|
if (mach_vm_map(mach_task_self(), &address, size, 0, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0,
|
|
|
|
false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE) != KERN_SUCCESS ||
|
|
|
|
vm_deallocate(mach_task_self(), address, size) != KERN_SUCCESS) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return toPointer(address);
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SharedMemoryBasic::ShareToProcess(base::ProcessId pid, Handle* aNewHandle) {
|
2015-06-22 18:54:40 +03:00
|
|
|
if (pid == getpid()) {
|
|
|
|
*aNewHandle = mPort;
|
2019-01-21 20:18:16 +03:00
|
|
|
return mach_port_mod_refs(mach_task_self(), *aNewHandle, MACH_PORT_RIGHT_SEND, 1) ==
|
|
|
|
KERN_SUCCESS;
|
2015-06-22 18:54:40 +03:00
|
|
|
}
|
2015-06-17 07:38:38 +03:00
|
|
|
StaticMutexAutoLock smal(gMutex);
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
// Serially number the messages, to check whether
|
2015-09-25 13:30:46 +03:00
|
|
|
// the reply we get was meant for us.
|
|
|
|
static uint64_t serial = 0;
|
|
|
|
uint64_t my_serial = serial;
|
|
|
|
serial++;
|
|
|
|
|
2015-06-17 07:38:38 +03:00
|
|
|
MemoryPorts* ports = GetMemoryPortsForPid(pid);
|
|
|
|
if (!ports) {
|
|
|
|
LOG_ERROR("Unable to get ports for process.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
MachSendMessage smsg(kSharePortsMsg);
|
|
|
|
smsg.AddDescriptor(MachMsgPortDescriptor(mPort, MACH_MSG_TYPE_COPY_SEND));
|
2015-09-25 13:30:46 +03:00
|
|
|
smsg.SetData(&my_serial, sizeof(uint64_t));
|
2015-06-17 07:38:38 +03:00
|
|
|
kern_return_t err = ports->mSender->SendMessage(smsg, kTimeout);
|
|
|
|
if (err != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("sending port failed %s %x\n", mach_error_string(err), err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
MachReceiveMessage msg;
|
|
|
|
err = ports->mReceiver->WaitForMessage(&msg, kTimeout);
|
|
|
|
if (err != KERN_SUCCESS) {
|
2015-12-24 20:54:07 +03:00
|
|
|
LOG_ERROR("short timeout didn't get an id %s %x\n", mach_error_string(err), err);
|
|
|
|
err = ports->mReceiver->WaitForMessage(&msg, kLongTimeout);
|
|
|
|
|
|
|
|
if (err != KERN_SUCCESS) {
|
|
|
|
LOG_ERROR("long timeout didn't get an id %s %x\n", mach_error_string(err), err);
|
|
|
|
return false;
|
|
|
|
}
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
2015-09-25 13:30:46 +03:00
|
|
|
if (msg.GetDataLength() != sizeof(SharePortsReply)) {
|
2015-06-17 07:38:38 +03:00
|
|
|
LOG_ERROR("Improperly formatted reply\n");
|
|
|
|
return false;
|
|
|
|
}
|
2015-09-25 13:30:46 +03:00
|
|
|
SharePortsReply* msg_data = reinterpret_cast<SharePortsReply*>(msg.GetData());
|
|
|
|
mach_port_t id = msg_data->port;
|
|
|
|
uint64_t serial_check = msg_data->serial;
|
|
|
|
if (serial_check != my_serial) {
|
2016-12-10 03:04:47 +03:00
|
|
|
LOG_ERROR("Serials do not match up: %" PRIu64 " vs %" PRIu64 "", serial_check, my_serial);
|
2015-09-25 13:30:46 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*aNewHandle = id;
|
2015-06-17 07:38:38 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
void SharedMemoryBasic::Unmap() {
|
2015-06-17 07:38:38 +03:00
|
|
|
if (!mMemory) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
vm_address_t address = toVMAddress(mMemory);
|
2015-06-18 20:57:51 +03:00
|
|
|
kern_return_t kr = vm_deallocate(mach_task_self(), address, round_page(mMappedSize));
|
2015-06-17 07:38:38 +03:00
|
|
|
if (kr != KERN_SUCCESS) {
|
2019-01-21 20:18:16 +03:00
|
|
|
LOG_ERROR("Failed to deallocate shared memory. %s (%x)\n", mach_error_string(kr), kr);
|
2015-06-17 07:38:38 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
mMemory = nullptr;
|
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
void SharedMemoryBasic::CloseHandle() {
|
2016-02-18 18:56:15 +03:00
|
|
|
if (mPort != MACH_PORT_NULL) {
|
|
|
|
mach_port_deallocate(mach_task_self(), mPort);
|
|
|
|
mPort = MACH_PORT_NULL;
|
2017-04-18 19:24:58 +03:00
|
|
|
mOpenRights = RightsReadWrite;
|
2016-02-18 18:56:15 +03:00
|
|
|
}
|
2015-06-17 07:38:38 +03:00
|
|
|
}
|
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
bool SharedMemoryBasic::IsHandleValid(const Handle& aHandle) const { return aHandle > 0; }
|
2015-06-17 07:38:38 +03:00
|
|
|
|
2019-01-21 20:18:16 +03:00
|
|
|
} // namespace ipc
|
|
|
|
} // namespace mozilla
|