Commit | Line | Data |
d1c9dd79 |
1 | #include "beos/beosish.h" |
dbc1d986 |
2 | #include "beos/beos_flock_server.h" |
30410c71 |
3 | |
efca5cc6 |
4 | #undef waitpid |
dbc1d986 |
5 | #undef close |
6 | #undef kill |
efca5cc6 |
7 | |
dbc1d986 |
8 | #include <errno.h> |
9 | #include <signal.h> |
10 | #include <stdio.h> |
11 | #include <stdlib.h> |
12 | #include <unistd.h> |
efca5cc6 |
13 | #include <sys/wait.h> |
14 | |
dbc1d986 |
15 | #include <OS.h> |
16 | |
17 | /* We cache, for which FDs we got a lock. This will especially speed up close(), |
18 | since we won't have to contact the server. */ |
19 | #define FLOCK_TABLE_SIZE 256 |
20 | static int flockTable[FLOCK_TABLE_SIZE]; |
21 | |
efca5cc6 |
22 | /* In BeOS 5.0 the waitpid() seems to misbehave in that the status |
dff18f87 |
23 | * has the upper and lower bytes swapped compared with the usual |
24 | * POSIX/UNIX implementations. To undo the surpise effect to the |
25 | * rest of Perl we need this wrapper. (The rest of BeOS might be |
26 | * surprised because of this, though.) */ |
efca5cc6 |
27 | |
28 | pid_t beos_waitpid(pid_t process_id, int *status_location, int options) { |
30410c71 |
29 | pid_t got = waitpid(process_id, status_location, options); |
7b9cd8f0 |
30 | if (status_location) |
dff18f87 |
31 | *status_location = |
32 | (*status_location & 0x00FF) << 8 | |
33 | (*status_location & 0xFF00) >> 8; |
efca5cc6 |
34 | return got; |
35 | } |
dbc1d986 |
36 | |
37 | /* The flock() emulation worker function. */ |
38 | |
39 | static status_t beos_flock(int fd, int operation) { |
40 | static int serverPortInitialized = 0; |
41 | static port_id serverPort = -1; |
42 | |
43 | struct stat st; |
44 | int blocking; |
45 | port_id replyPort; |
46 | sem_id lockSem = -1; |
47 | status_t error; |
48 | flock_server_request request; |
49 | flock_server_reply *reply = NULL; |
50 | |
51 | if (fd < 0) |
52 | return B_BAD_VALUE; |
53 | |
54 | blocking = !(operation & LOCK_NB); |
55 | operation &= LOCK_SH | LOCK_EX | LOCK_UN; |
56 | |
57 | /* don't try to unlock something that isn't locked */ |
58 | if (operation == LOCK_UN && fd < FLOCK_TABLE_SIZE && !flockTable[fd]) |
59 | return B_OK; |
60 | |
61 | /* if not yet initialized, get the server port */ |
62 | if (!serverPortInitialized) { |
63 | serverPort = find_port(FLOCK_SERVER_PORT_NAME); |
64 | /* bonefish: If the port wasn't present at this point, we could start |
65 | * the server. In fact, I tried this and in works, but unfortunately |
66 | * it also seems to confuse our pipes (with both load_image() and |
67 | * system()). So, we can't help it, the server has to be started |
68 | * manually. */ |
69 | serverPortInitialized = ~0; |
70 | } |
71 | if (serverPort < 0) |
72 | return B_ERROR; |
73 | |
74 | /* stat() the file to get the node_ref */ |
75 | if (fstat(fd, &st) < 0) |
76 | return errno; |
77 | |
78 | /* create a reply port */ |
79 | replyPort = create_port(1, "flock reply port"); |
80 | if (replyPort < 0) |
81 | return replyPort; |
82 | |
83 | /* create a semaphore others will wait on while we own the lock */ |
84 | if (operation != LOCK_UN) { |
85 | char semName[64]; |
86 | sprintf(semName, "flock %ld:%lld\n", st.st_dev, st.st_ino); |
87 | lockSem = create_sem(0, semName); |
88 | if (lockSem < 0) { |
89 | delete_port(replyPort); |
90 | return lockSem; |
91 | } |
92 | } |
93 | |
94 | /* prepare the request */ |
95 | request.replyPort = replyPort; |
96 | request.lockSem = lockSem; |
97 | request.device = st.st_dev; |
98 | request.node = st.st_ino; |
99 | request.fd = fd; |
100 | request.operation = operation; |
101 | request.blocking = blocking; |
102 | |
103 | /* We ask the server to get us the requested lock for the file. |
104 | * The server returns semaphores for all existing locks (or will exist |
105 | * before it's our turn) that prevent us from getting the lock just now. |
106 | * We block on them one after the other and after that officially own the |
107 | * lock. If we told the server that we don't want to block, it will send |
108 | * an error code, if that is not possible. */ |
109 | |
110 | /* send the request */ |
111 | error = write_port(serverPort, 0, &request, sizeof(request)); |
112 | |
113 | if (error == B_OK) { |
114 | /* get the reply size */ |
115 | int replySize = port_buffer_size(replyPort); |
116 | if (replySize < 0) |
117 | error = replySize; |
118 | |
119 | /* allocate reply buffer */ |
120 | if (error == B_OK) { |
121 | reply = (flock_server_reply*)malloc(replySize); |
122 | if (!reply) |
123 | error = B_NO_MEMORY; |
124 | } |
125 | |
126 | /* read the reply */ |
127 | if (error == B_OK) { |
128 | int32 code; |
129 | ssize_t bytesRead = read_port(replyPort, &code, reply, replySize); |
130 | if (bytesRead < 0) { |
131 | error = bytesRead; |
132 | } else if (bytesRead != replySize) { |
133 | error = B_ERROR; |
134 | } |
135 | } |
136 | } |
137 | |
138 | /* get the error returned by the server */ |
139 | if (error == B_OK) |
140 | error = reply->error; |
141 | |
142 | /* wait for all lockers before us */ |
143 | if (error == B_OK) { |
144 | int i; |
145 | for (i = 0; i < reply->semaphoreCount; i++) |
146 | while (acquire_sem(reply->semaphores[i]) == B_INTERRUPTED); |
147 | } |
148 | |
149 | /* free the reply buffer */ |
150 | free(reply); |
151 | |
152 | /* delete the reply port */ |
153 | delete_port(replyPort); |
154 | |
155 | /* on failure delete the semaphore */ |
156 | if (error != B_OK) |
157 | delete_sem(lockSem); |
158 | |
159 | /* update the entry in the flock table */ |
160 | if (error == B_OK && fd < FLOCK_TABLE_SIZE) { |
161 | if (operation == LOCK_UN) |
162 | flockTable[fd] = 0; |
163 | else |
164 | flockTable[fd] = 1; |
165 | } |
166 | |
167 | return error; |
168 | } |
169 | |
170 | /* We implement flock() using a server. It is not really compliant with, since |
171 | * it would be very hard to track dup()ed FDs and those cloned as side-effect |
172 | * of fork(). Our locks are bound to the process (team) and a particular FD. |
173 | * I.e. a lock acquired by a team using a FD can only be unlocked by the same |
174 | * team using exactly the same FD (no other one pointing to the same file, not |
175 | * even when dup()ed from the original one). close()ing the FD releases the |
176 | * lock (that's why we need to override close()). On termination of the team |
177 | * all locks owned by the team will automatically be released. */ |
178 | |
179 | int flock(int fd, int operation) { |
180 | status_t error = beos_flock(fd, operation); |
181 | return (error == B_OK ? 0 : (errno = error, -1)); |
182 | } |
183 | |
184 | /* We need to override close() to release a potential lock on the FD. See |
185 | flock() for details */ |
186 | |
187 | int beos_close(int fd) { |
188 | flock(fd, LOCK_UN); |
189 | |
190 | return close(fd); |
191 | } |
192 | |
193 | |
194 | /* BeOS kill() doesn't like the combination of the pseudo-signal 0 and |
195 | * specifying a process group (i.e. pid < -1 || pid == 0). We work around |
196 | * by changing pid to the respective process group leader. That should work |
197 | * well enough in most cases. */ |
198 | |
199 | int beos_kill(pid_t pid, int sig) |
200 | { |
201 | if (sig == 0) { |
202 | if (pid == 0) { |
203 | /* it's our process group */ |
204 | pid = getpgrp(); |
205 | } else if (pid < -1) { |
206 | /* just address the process group leader */ |
207 | pid = -pid; |
208 | } |
209 | } |
210 | |
211 | return kill(pid, sig); |
212 | } |