1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build unix
6
7#include <pthread.h>
8#include <errno.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h> // strerror
12#include <time.h>
13#include "libcgo.h"
14#include "libcgo_unix.h"
15
16static pthread_cond_t runtime_init_cond = PTHREAD_COND_INITIALIZER;
17static pthread_mutex_t runtime_init_mu = PTHREAD_MUTEX_INITIALIZER;
18static int runtime_init_done;
19
20// pthread_g is a pthread specific key, for storing the g that binded to the C thread.
21// The registered pthread_key_destructor will dropm, when the pthread-specified value g is not NULL,
22// while a C thread is exiting.
23static pthread_key_t pthread_g;
24static void pthread_key_destructor(void* g);
25uintptr_t x_cgo_pthread_key_created;
26void (*x_crosscall2_ptr)(void (*fn)(void *), void *, int, size_t);
27
28// The context function, used when tracing back C calls into Go.
29static void (*cgo_context_function)(struct context_arg*);
30
31void
32x_cgo_sys_thread_create(void* (*func)(void*), void* arg) {
33 pthread_t p;
34 int err = _cgo_try_pthread_create(&p, NULL, func, arg);
35 if (err != 0) {
36 fprintf(stderr, "pthread_create failed: %s", strerror(err));
37 abort();
38 }
39}
40
41uintptr_t
42_cgo_wait_runtime_init_done(void) {
43 void (*pfn)(struct context_arg*);
44 pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
45
46 int done = 2;
47 if (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) != done) {
48 pthread_mutex_lock(&runtime_init_mu);
49 while (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) == 0) {
50 pthread_cond_wait(&runtime_init_cond, &runtime_init_mu);
51 }
52
53 // The key and x_cgo_pthread_key_created are for the whole program,
54 // whereas the specific and destructor is per thread.
55 if (x_cgo_pthread_key_created == 0 && pthread_key_create(&pthread_g, pthread_key_destructor) == 0) {
56 x_cgo_pthread_key_created = 1;
57 }
58
59
60 // TODO(iant): For the case of a new C thread calling into Go, such
61 // as when using -buildmode=c-archive, we know that Go runtime
62 // initialization is complete but we do not know that all Go init
63 // functions have been run. We should not fetch cgo_context_function
64 // until they have been, because that is where a call to
65 // SetCgoTraceback is likely to occur. We are going to wait for Go
66 // initialization to be complete anyhow, later, by waiting for
67 // main_init_done to be closed in cgocallbackg1. We should wait here
68 // instead. See also issue #15943.
69 pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
70
71 __atomic_store_n(&runtime_init_done, done, __ATOMIC_RELEASE);
72 pthread_mutex_unlock(&runtime_init_mu);
73 }
74
75 if (pfn != nil) {
76 struct context_arg arg;
77
78 arg.Context = 0;
79 (*pfn)(&arg);
80 return arg.Context;
81 }
82 return 0;
83}
84
85// _cgo_set_stacklo sets g->stacklo based on the stack size.
86// This is common code called from x_cgo_init, which is itself
87// called by rt0_go in the runtime package.
88void _cgo_set_stacklo(G *g, uintptr *pbounds)
89{
90 uintptr bounds[2];
91
92 // pbounds can be passed in by the caller; see gcc_linux_amd64.c.
93 if (pbounds == NULL) {
94 pbounds = &bounds[0];
95 }
96
97 x_cgo_getstackbound(pbounds);
98
99 g->stacklo = *pbounds;
100
101 // Sanity check the results now, rather than getting a
102 // morestack on g0 crash.
103 if (g->stacklo >= g->stackhi) {
104 fprintf(stderr, "runtime/cgo: bad stack bounds: lo=%p hi=%p\n", (void*)(g->stacklo), (void*)(g->stackhi));
105 abort();
106 }
107}
108
109// Store the g into a thread-specific value associated with the pthread key pthread_g.
110// And pthread_key_destructor will dropm when the thread is exiting.
111void x_cgo_bindm(void* g) {
112 // We assume this will always succeed, otherwise, there might be extra M leaking,
113 // when a C thread exits after a cgo call.
114 // We only invoke this function once per thread in runtime.needAndBindM,
115 // and the next calls just reuse the bound m.
116 pthread_setspecific(pthread_g, g);
117}
118
119void
120x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) {
121 pthread_mutex_lock(&runtime_init_mu);
122 __atomic_store_n(&runtime_init_done, 1, __ATOMIC_RELEASE);
123 pthread_cond_broadcast(&runtime_init_cond);
124 pthread_mutex_unlock(&runtime_init_mu);
125}
126
127// Sets the context function to call to record the traceback context
128// when calling a Go function from C code. Called from runtime.SetCgoTraceback.
129void x_cgo_set_context_function(void (*context)(struct context_arg*)) {
130 __atomic_store_n(&cgo_context_function, context, __ATOMIC_RELEASE);
131}
132
133// Gets the context function.
134void (*(_cgo_get_context_function(void)))(struct context_arg*) {
135 return __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
136}
137
138// _cgo_try_pthread_create retries pthread_create if it fails with
139// EAGAIN.
140int
141_cgo_try_pthread_create(pthread_t* thread, const pthread_attr_t* attr, void* (*pfn)(void*), void* arg) {
142 int tries;
143 int err;
144 struct timespec ts;
145
146 for (tries = 0; tries < 20; tries++) {
147 err = pthread_create(thread, attr, pfn, arg);
148 if (err == 0) {
149 pthread_detach(*thread);
150 return 0;
151 }
152 if (err != EAGAIN) {
153 return err;
154 }
155 ts.tv_sec = 0;
156 ts.tv_nsec = (tries + 1) * 1000 * 1000; // Milliseconds.
157 nanosleep(&ts, nil);
158 }
159 return EAGAIN;
160}
161
162static void
163pthread_key_destructor(void* g) {
164 if (x_crosscall2_ptr != NULL) {
165 // fn == NULL means dropm.
166 // We restore g by using the stored g, before dropm in runtime.cgocallback,
167 // since the g stored in the TLS by Go might be cleared in some platforms,
168 // before this destructor invoked.
169 x_crosscall2_ptr(NULL, g, 0, 0);
170 }
171}
View as plain text