[nspluginwrapper/f15/master] Race condition patch (#680279)
Martin Stransky
stransky at fedoraproject.org
Wed Mar 9 10:04:37 UTC 2011
commit 685840b4d524e7c1e40ef574aecb211009432d1d
Author: Martin Stransky <stransky at redhat.com>
Date: Wed Mar 9 11:04:31 2011 +0100
Race condition patch (#680279)
nspluginwrapper-1.3.0-racecond.patch | 219 ++++++++++++++++++++++++++++++++++
nspluginwrapper.spec | 7 +-
2 files changed, 225 insertions(+), 1 deletions(-)
---
diff --git a/nspluginwrapper-1.3.0-racecond.patch b/nspluginwrapper-1.3.0-racecond.patch
new file mode 100644
index 0000000..f538ce2
--- /dev/null
+++ b/nspluginwrapper-1.3.0-racecond.patch
@@ -0,0 +1,219 @@
+--- nspluginwrapper-1.3.0/src/rpc.h
++++ nspluginwrapper-1.3.0/src/rpc.h
+@@ -119,6 +119,7 @@ extern int rpc_method_invoke(rpc_connection_t *connection, int method, ...) attr
+ extern int rpc_method_wait_for_reply(rpc_connection_t *connection, ...) attribute_hidden;
+ extern int rpc_method_get_args(rpc_connection_t *connection, ...) attribute_hidden;
+ extern int rpc_method_send_reply(rpc_connection_t *connection, ...) attribute_hidden;
++extern bool rpc_method_in_invoke(rpc_connection_t *connection) attribute_hidden;
+
+ #ifdef __cplusplus
+ }
+
+diff -up nspluginwrapper-1.3.0/src/rpc.c nspluginwrapper-1.3.0/src/rpc.c
+--- nspluginwrapper-1.3.0/src/rpc.c 2009-01-02 15:22:29.000000000 +0100
++++ nspluginwrapper-1.3.0/src/rpc.c 2011-03-08 13:45:09.321980900 +0100
+@@ -2097,6 +2097,22 @@
+ return ret;
+ }
+
++bool rpc_method_in_invoke(rpc_connection_t *connection)
++{
++ D(bug("rpc_method_in_invoke\n"));
++ if (connection == NULL)
++ return false;
++ // Our stack should alternate between handle/dispatch and
++ // invokes. Some calls are only safe to handle called from an event
++ // loop. In this case, we should have values invoke_depth = 0;
++ // handle_depth = 1; dispatch_depth = 1
++ D(bug("invoke_depth = %d; dispatch_depth = %d; handle_depth = %d\n",
++ connection->invoke_depth,
++ connection->dispatch_depth,
++ connection->handle_depth));
++ return connection->invoke_depth > 0;
++}
++
+
+ /* ====================================================================== */
+ /* === Test Program === */
+
+--- nspluginwrapper-1.3.0/src/npw-viewer.c 2011-03-08 14:46:18.121980900 +0100
++++ nspluginwrapper-1.3.0/src/npw-viewer.c 2011-03-08 14:47:01.638980902 +0100
+@@ -221,8 +221,17 @@
+ static GList *g_delayed_calls = NULL;
+ static guint g_delayed_calls_id = 0;
+
++// We put delayed NPP_Destroy calls on a separate list because, unlike
++// NPN_ReleaseObject, these must be called on a clean stack and have no
++// other cause to get cleared. Otherwise, it is possible for the
++// delayed_calls_process in g_NPP_Destroy_Now to call it early.
++static GList *g_delayed_destroys = NULL;
++static guint g_delayed_destroys_id = 0;
++
+ static void g_NPN_ReleaseObject_Now(NPObject *npobj);
++static NPError g_NPP_Destroy_Now(PluginInstance *plugin, NPSavedData **sdata);
+ static gboolean delayed_calls_process_cb(gpointer user_data);
++static gboolean delayed_destroys_process_cb(gpointer user_data);
+
+ static void delayed_calls_add(int type, gpointer data)
+ {
+@@ -238,6 +247,15 @@
+ delayed_calls_process_cb, NULL, NULL);
+ }
+
++static void delayed_destroys_add(PluginInstance *plugin)
++{
++ g_delayed_destroys = g_list_append(g_delayed_destroys, plugin);
++
++ if (g_delayed_destroys_id == 0)
++ g_delayed_destroys_id = g_idle_add_full(G_PRIORITY_LOW,
++ delayed_destroys_process_cb, NULL, NULL);
++}
++
+ // Returns whether there are pending calls left in the queue
+ static gboolean delayed_calls_process(PluginInstance *plugin, gboolean is_in_NPP_Destroy)
+ {
+@@ -280,6 +298,25 @@
+ return delayed_calls_process(NULL, FALSE);
+ }
+
++static gboolean delayed_destroys_process_cb(gpointer user_data)
++{
++ while (g_delayed_destroys != NULL) {
++ PluginInstance *plugin = (PluginInstance *)g_delayed_destroys->data;
++ g_delayed_destroys = g_list_delete_link(g_delayed_destroys,
++ g_delayed_destroys);
++ g_NPP_Destroy_Now(plugin, NULL);
++ }
++
++ if (g_delayed_destroys)
++ return TRUE;
++
++ if (g_delayed_destroys_id) {
++ g_source_remove(g_delayed_destroys_id);
++ g_delayed_destroys_id = 0;
++ }
++ return FALSE;
++}
++
+ // NPIdentifier cache
+ static inline bool use_npidentifier_cache(void)
+ {
+@@ -741,7 +778,6 @@
+ }
+ }
+
+-
+ /* ====================================================================== */
+ /* === XPCOM glue === */
+ /* ====================================================================== */
+@@ -3337,6 +3373,13 @@
+ return error;
+ }
+
++ /* Clear any NPP_Destroys we may have delayed. Although it doesn't
++ really matter, and the plugin is going to die soon.
++
++ XXX: To be really picky, we should probably delay this and make
++ sure it is run on a new event loop iteration. */
++ delayed_destroys_process_cb(NULL);
++
+ NPError ret = g_NP_Shutdown();
+ return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID);
+ }
+@@ -3458,6 +3501,8 @@
+
+ // Process all pending calls as the data could become junk afterwards
+ // XXX: this also processes delayed calls from other instances
++ // XXX: Also, if this was delayed, the NPN_ReleaseObject calls will
++ // be ignored; the browser thinks we've already died.
+ delayed_calls_process(plugin, TRUE);
+
+ D(bugiI("NPP_Destroy instance=%p\n", instance));
+@@ -3472,6 +3517,22 @@
+ return ret;
+ }
+
++static NPError g_NPP_Destroy_Now(PluginInstance *plugin, NPSavedData **save)
++{
++ D(bug("g_NPP_Destroy_Now\n"));
++
++ NPSavedData *save_area = NULL;
++ NPError ret = g_NPP_Destroy(PLUGIN_INSTANCE_NPP(plugin), &save_area);
++ if (save) {
++ *save = save_area;
++ } else if (save_area) {
++ npw_printf("WARNING: NPP_Destroy returned save_area, but it was ignored\n");
++ }
++
++ rpc_connection_unref(g_rpc_connection);
++ return ret;
++}
++
+ static int handle_NPP_Destroy(rpc_connection_t *connection)
+ {
+ D(bug("handle_NPP_Destroy\n"));
+@@ -3487,8 +3548,26 @@
+ return error;
+ }
+
+- NPSavedData *save_area;
+- NPError ret = g_NPP_Destroy(PLUGIN_INSTANCE_NPP(plugin), &save_area);
++ NPSavedData *save_area = NULL;
++ NPError ret = NPERR_NO_ERROR;
++ /* Take a ref for the rpc_method_send_reply; otherwise the
++ * rpc_connection_unref in g_NPP_Destroy_Now may cause a slight
++ * nuisance. */
++ rpc_connection_ref(connection);
++ if (!rpc_method_in_invoke(connection)) {
++ /* The plugin is not on the stack; it's safe to call this. */
++ D(bug("NPP_Destroy is fine.\n"));
++ ret = g_NPP_Destroy_Now(plugin, &save_area);
++ } else {
++ /* It is not safe to call NPP_Destroy right now. Delay it until we
++ * return to the event loop.
++ *
++ * NOTE: This means that the browser never sees the real return
++ * value of NPP_Destroy; the NPSavedData will be discarded, and any
++ * error code will be ignored. */
++ D(bug("NPP_Destroy raced; delaying it to get a clean stack.\n"));
++ delayed_destroys_add(plugin);
++ }
+
+ error = rpc_method_send_reply(connection,
+ RPC_TYPE_INT32, ret,
+
+--- nspluginwrapper-1.3.0/src/npw-viewer.c
++++ nspluginwrapper-1.3.0/src/npw-viewer.c
+@@ -212,10 +212,7 @@ static void delayed_calls_add(int type, gpointer data)
+ // Returns whether there are pending calls left in the queue
+ static gboolean delayed_calls_process(PluginInstance *plugin, gboolean is_in_NPP_Destroy)
+ {
+- GList *l = g_delayed_calls;
+- while (l != NULL) {
+- GList *cl = l;
+- l = l->next;
++ while (g_delayed_calls != NULL) {
+
+ if (!is_in_NPP_Destroy) {
+ /* Continue later if there is incoming RPC */
+@@ -223,7 +220,11 @@ static gboolean delayed_calls_process(PluginInstance *plugin, gboolean is_in_NPP
+ return TRUE;
+ }
+
+- DelayedCall *dcall = (DelayedCall *)cl->data;
++ DelayedCall *dcall = (DelayedCall *)g_delayed_calls->data;
++ /* XXX: Remove the link first; this function /must/ be
++ * re-entrant. We may be called again while processing the
++ * delayed call. */
++ g_delayed_calls = g_list_delete_link(g_delayed_calls, g_delayed_calls);
+ switch (dcall->type) {
+ case RPC_DELAYED_NPN_RELEASE_OBJECT:
+ {
+@@ -233,7 +234,6 @@ static gboolean delayed_calls_process(PluginInstance *plugin, gboolean is_in_NPP
+ }
+ }
+ NPW_MemFree(dcall);
+- g_delayed_calls = g_list_delete_link(g_delayed_calls, cl);
+ }
+
+ if (g_delayed_calls)
diff --git a/nspluginwrapper.spec b/nspluginwrapper.spec
index e3f0432..c06b7ee 100644
--- a/nspluginwrapper.spec
+++ b/nspluginwrapper.spec
@@ -73,7 +73,7 @@
Summary: A compatibility layer for Netscape 4 plugins
Name: nspluginwrapper
Version: 1.3.0
-Release: 17%{?dist}
+Release: 18%{?dist}
Source0: http://gwenole.beauchesne.info/projects/nspluginwrapper/files/%{name}-%{version}%{?svndate:-%{svndate}}.tar.bz2
Source1: %{plugin_config_name}.tar.gz
Source2: plugin-config.sh.in
@@ -88,6 +88,7 @@ Patch7: nspluginwrapper-1.3.0-comp.patch
Patch8: nspluginwrapper-1.3.0-silent.patch
Patch9: nspluginwrapper-1.3.0-timeout.patch
Patch10: npplayer-xid.patch
+Patch11: nspluginwrapper-1.3.0-racecond.patch
Patch100: plugin-config-setuid.patch
Patch101: plugin-config-umask.patch
Patch102: plugin-config-print.patch
@@ -128,6 +129,7 @@ This package consists in:
%patch8 -p1 -b .silent
%patch9 -p1 -b .timeout
%patch10 -p1 -b .xid
+%patch11 -p1 -b .racecond
# Plugin-config patches
pushd %plugin_config_name
@@ -260,6 +262,9 @@ fi;
%changelog
+* Wed Mar 09 2011 Peter Hatina <phatina at redhat.coom> 1.3.0-18
+- Race condition patch
+
* Tue Feb 08 2011 Fedora Release Engineering <rel-eng at lists.fedoraproject.org> - 1.3.0-17
- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
More information about the scm-commits
mailing list