VirtualBox

source: vbox/trunk/src/libs/openssl-3.4.1/crypto/threads_win.c

Last change on this file was 109052, checked in by vboxsync, 4 weeks ago

openssl-3.4.1: Applied our changes, regenerated files, added missing files and functions. This time with a three way merge. ​bugref:10890

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.3 KB
Line 
1/*
2 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#if defined(_WIN32)
11# include <windows.h>
12# if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
13# define USE_RWLOCK
14# endif
15#endif
16#include <assert.h>
17
18/*
19 * VC++ 2008 or earlier x86 compilers do not have an inline implementation
20 * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit.
21 * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements
22 * To work around this problem, we implement a manual locking mechanism for
23 * only VC++ 2008 or earlier x86 compilers.
24 */
25
26#if ((defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600) || (defined(__MINGW32__) && !defined(__MINGW64__)))
27# define NO_INTERLOCKEDOR64
28#endif
29
30#include <openssl/crypto.h>
31#include <crypto/cryptlib.h>
32#include "internal/common.h"
33#include "internal/thread_arch.h"
34#include "internal/rcu.h"
35#include "rcu_internal.h"
36
37#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS)
38
39# ifdef USE_RWLOCK
40typedef struct {
41 SRWLOCK lock;
42 int exclusive;
43} CRYPTO_win_rwlock;
44# endif
45
46/*
47 * users is broken up into 2 parts
48 * bits 0-31 current readers
49 * bit 32-63 ID
50 */
51# define READER_SHIFT 0
52# define ID_SHIFT 32
53/* TODO: READER_SIZE 16 in threads_pthread.c */
54# define READER_SIZE 32
55# define ID_SIZE 32
56
57# define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
58# define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
59# define READER_COUNT(x) ((uint32_t)(((uint64_t)(x) >> READER_SHIFT) & \
60 READER_MASK))
61# define ID_VAL(x) ((uint32_t)(((uint64_t)(x) >> ID_SHIFT) & ID_MASK))
62# define VAL_READER ((int64_t)1 << READER_SHIFT)
63# define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
64
65/*
66 * This defines a quescent point (qp)
67 * This is the barrier beyond which a writer
68 * must wait before freeing data that was
69 * atomically updated
70 */
71struct rcu_qp {
72 volatile uint64_t users;
73};
74
75struct thread_qp {
76 struct rcu_qp *qp;
77 unsigned int depth;
78 CRYPTO_RCU_LOCK *lock;
79};
80
81#define MAX_QPS 10
82/*
83 * This is the per thread tracking data
84 * that is assigned to each thread participating
85 * in an rcu qp
86 *
87 * qp points to the qp that it last acquired
88 *
89 */
90struct rcu_thr_data {
91 struct thread_qp thread_qps[MAX_QPS];
92};
93
94/*
95 * This is the internal version of a CRYPTO_RCU_LOCK
96 * it is cast from CRYPTO_RCU_LOCK
97 */
98struct rcu_lock_st {
99 /* Callbacks to call for next ossl_synchronize_rcu */
100 struct rcu_cb_item *cb_items;
101
102 /* The context we are being created against */
103 OSSL_LIB_CTX *ctx;
104
105 /* rcu generation counter for in-order retirement */
106 uint32_t id_ctr;
107
108 /* TODO: can be moved before id_ctr for better alignment */
109 /* Array of quiescent points for synchronization */
110 struct rcu_qp *qp_group;
111
112 /* Number of elements in qp_group array */
113 uint32_t group_count;
114
115 /* Index of the current qp in the qp_group array */
116 uint32_t reader_idx;
117
118 /* value of the next id_ctr value to be retired */
119 uint32_t next_to_retire;
120
121 /* index of the next free rcu_qp in the qp_group */
122 uint32_t current_alloc_idx;
123
124 /* number of qp's in qp_group array currently being retired */
125 uint32_t writers_alloced;
126
127 /* lock protecting write side operations */
128 CRYPTO_MUTEX *write_lock;
129
130 /* lock protecting updates to writers_alloced/current_alloc_idx */
131 CRYPTO_MUTEX *alloc_lock;
132
133 /* signal to wake threads waiting on alloc_lock */
134 CRYPTO_CONDVAR *alloc_signal;
135
136 /* lock to enforce in-order retirement */
137 CRYPTO_MUTEX *prior_lock;
138
139 /* signal to wake threads waiting on prior_lock */
140 CRYPTO_CONDVAR *prior_signal;
141
142 /* lock used with NO_INTERLOCKEDOR64: VS2010 x86 */
143 CRYPTO_RWLOCK *rw_lock;
144};
145
146/* TODO: count should be unsigned, e.g uint32_t */
147/* a negative value could result in unexpected behaviour */
148static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock,
149 int count)
150{
151 struct rcu_qp *new =
152 OPENSSL_zalloc(sizeof(*new) * count);
153
154 lock->group_count = count;
155 return new;
156}
157
158CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
159{
160 struct rcu_lock_st *new;
161
162 /*
163 * We need a minimum of 3 qps
164 */
165 if (num_writers < 3)
166 num_writers = 3;
167
168 ctx = ossl_lib_ctx_get_concrete(ctx);
169 if (ctx == NULL)
170 return 0;
171
172 new = OPENSSL_zalloc(sizeof(*new));
173
174 if (new == NULL)
175 return NULL;
176
177 new->ctx = ctx;
178 new->rw_lock = CRYPTO_THREAD_lock_new();
179 new->write_lock = ossl_crypto_mutex_new();
180 new->alloc_signal = ossl_crypto_condvar_new();
181 new->prior_signal = ossl_crypto_condvar_new();
182 new->alloc_lock = ossl_crypto_mutex_new();
183 new->prior_lock = ossl_crypto_mutex_new();
184 new->qp_group = allocate_new_qp_group(new, num_writers);
185 /* By default the first qp is already alloced */
186 new->writers_alloced = 1;
187 if (new->qp_group == NULL
188 || new->alloc_signal == NULL
189 || new->prior_signal == NULL
190 || new->write_lock == NULL
191 || new->alloc_lock == NULL
192 || new->prior_lock == NULL
193 || new->rw_lock == NULL) {
194 CRYPTO_THREAD_lock_free(new->rw_lock);
195 OPENSSL_free(new->qp_group);
196 ossl_crypto_condvar_free(&new->alloc_signal);
197 ossl_crypto_condvar_free(&new->prior_signal);
198 ossl_crypto_mutex_free(&new->alloc_lock);
199 ossl_crypto_mutex_free(&new->prior_lock);
200 ossl_crypto_mutex_free(&new->write_lock);
201 OPENSSL_free(new);
202 new = NULL;
203 }
204
205 return new;
206
207}
208
209void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
210{
211 CRYPTO_THREAD_lock_free(lock->rw_lock);
212 OPENSSL_free(lock->qp_group);
213 ossl_crypto_condvar_free(&lock->alloc_signal);
214 ossl_crypto_condvar_free(&lock->prior_signal);
215 ossl_crypto_mutex_free(&lock->alloc_lock);
216 ossl_crypto_mutex_free(&lock->prior_lock);
217 ossl_crypto_mutex_free(&lock->write_lock);
218 OPENSSL_free(lock);
219}
220
221/* Read side acquisition of the current qp */
222static ossl_inline struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock)
223{
224 uint32_t qp_idx;
225 uint32_t tmp;
226 uint64_t tmp64;
227
228 /* get the current qp index */
229 for (;;) {
230 CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&qp_idx,
231 lock->rw_lock);
232 CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, VAL_READER, &tmp64,
233 lock->rw_lock);
234 CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&tmp,
235 lock->rw_lock);
236 if (qp_idx == tmp)
237 break;
238 CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, -VAL_READER, &tmp64,
239 lock->rw_lock);
240 }
241
242 return &lock->qp_group[qp_idx];
243}
244
245static void ossl_rcu_free_local_data(void *arg)
246{
247 OSSL_LIB_CTX *ctx = arg;
248 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
249 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
250 OPENSSL_free(data);
251 CRYPTO_THREAD_set_local(lkey, NULL);
252}
253
254void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
255{
256 struct rcu_thr_data *data;
257 int i;
258 int available_qp = -1;
259 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
260
261 /*
262 * we're going to access current_qp here so ask the
263 * processor to fetch it
264 */
265 data = CRYPTO_THREAD_get_local(lkey);
266
267 if (data == NULL) {
268 data = OPENSSL_zalloc(sizeof(*data));
269 OPENSSL_assert(data != NULL);
270 CRYPTO_THREAD_set_local(lkey, data);
271 ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
272 }
273
274 for (i = 0; i < MAX_QPS; i++) {
275 if (data->thread_qps[i].qp == NULL && available_qp == -1)
276 available_qp = i;
277 /* If we have a hold on this lock already, we're good */
278 if (data->thread_qps[i].lock == lock)
279 return;
280 }
281
282 /*
283 * if we get here, then we don't have a hold on this lock yet
284 */
285 assert(available_qp != -1);
286
287 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
288 data->thread_qps[available_qp].depth = 1;
289 data->thread_qps[available_qp].lock = lock;
290}
291
292void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
293{
294 ossl_crypto_mutex_lock(lock->write_lock);
295}
296
297void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
298{
299 ossl_crypto_mutex_unlock(lock->write_lock);
300}
301
302void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
303{
304 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
305 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
306 int i;
307 LONG64 ret;
308
309 assert(data != NULL);
310
311 for (i = 0; i < MAX_QPS; i++) {
312 if (data->thread_qps[i].lock == lock) {
313 data->thread_qps[i].depth--;
314 if (data->thread_qps[i].depth == 0) {
315 CRYPTO_atomic_add64(&data->thread_qps[i].qp->users,
316 -VAL_READER, (uint64_t *)&ret,
317 lock->rw_lock);
318 OPENSSL_assert(ret >= 0);
319 data->thread_qps[i].qp = NULL;
320 data->thread_qps[i].lock = NULL;
321 }
322 return;
323 }
324 }
325}
326
327/*
328 * Write side allocation routine to get the current qp
329 * and replace it with a new one
330 */
331static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
332{
333 uint64_t new_id;
334 uint32_t current_idx;
335 uint32_t tmp;
336 uint64_t tmp64;
337
338 ossl_crypto_mutex_lock(lock->alloc_lock);
339 /*
340 * we need at least one qp to be available with one
341 * left over, so that readers can start working on
342 * one that isn't yet being waited on
343 */
344 while (lock->group_count - lock->writers_alloced < 2)
345 /* we have to wait for one to be free */
346 ossl_crypto_condvar_wait(lock->alloc_signal, lock->alloc_lock);
347
348 current_idx = lock->current_alloc_idx;
349
350 /* Allocate the qp */
351 lock->writers_alloced++;
352
353 /* increment the allocation index */
354 lock->current_alloc_idx =
355 (lock->current_alloc_idx + 1) % lock->group_count;
356
357 /* get and insert a new id */
358 new_id = VAL_ID(lock->id_ctr);
359 lock->id_ctr++;
360
361 /*
362 * Even though we are under a write side lock here
363 * We need to use atomic instructions to ensure that the results
364 * of this update are published to the read side prior to updating the
365 * reader idx below
366 */
367 CRYPTO_atomic_and(&lock->qp_group[current_idx].users, ID_MASK, &tmp64,
368 lock->rw_lock);
369 CRYPTO_atomic_add64(&lock->qp_group[current_idx].users, new_id, &tmp64,
370 lock->rw_lock);
371
372 /* update the reader index to be the prior qp */
373 tmp = lock->current_alloc_idx;
374 InterlockedExchange((LONG volatile *)&lock->reader_idx, tmp);
375
376 /* wake up any waiters */
377 ossl_crypto_condvar_broadcast(lock->alloc_signal);
378 ossl_crypto_mutex_unlock(lock->alloc_lock);
379 return &lock->qp_group[current_idx];
380}
381
382static void retire_qp(CRYPTO_RCU_LOCK *lock,
383 struct rcu_qp *qp)
384{
385 ossl_crypto_mutex_lock(lock->alloc_lock);
386 lock->writers_alloced--;
387 ossl_crypto_condvar_broadcast(lock->alloc_signal);
388 ossl_crypto_mutex_unlock(lock->alloc_lock);
389}
390
391
392void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
393{
394 struct rcu_qp *qp;
395 uint64_t count;
396 struct rcu_cb_item *cb_items, *tmpcb;
397
398 /* before we do anything else, lets grab the cb list */
399 cb_items = InterlockedExchangePointer((void * volatile *)&lock->cb_items, NULL);
400
401 qp = update_qp(lock);
402
403 /* wait for the reader count to reach zero */
404 do {
405 CRYPTO_atomic_load(&qp->users, &count, lock->rw_lock);
406 } while (READER_COUNT(count) != 0);
407
408 /* retire in order */
409 ossl_crypto_mutex_lock(lock->prior_lock);
410 while (lock->next_to_retire != ID_VAL(count))
411 ossl_crypto_condvar_wait(lock->prior_signal, lock->prior_lock);
412
413 lock->next_to_retire++;
414 ossl_crypto_condvar_broadcast(lock->prior_signal);
415 ossl_crypto_mutex_unlock(lock->prior_lock);
416
417 retire_qp(lock, qp);
418
419 /* handle any callbacks that we have */
420 while (cb_items != NULL) {
421 tmpcb = cb_items;
422 cb_items = cb_items->next;
423 tmpcb->fn(tmpcb->data);
424 OPENSSL_free(tmpcb);
425 }
426
427 /* and we're done */
428 return;
429
430}
431
432int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
433{
434 struct rcu_cb_item *new;
435
436 new = OPENSSL_zalloc(sizeof(struct rcu_cb_item));
437 if (new == NULL)
438 return 0;
439 new->data = data;
440 new->fn = cb;
441
442 new->next = InterlockedExchangePointer((void * volatile *)&lock->cb_items, new);
443 return 1;
444}
445
446void *ossl_rcu_uptr_deref(void **p)
447{
448 return (void *)*p;
449}
450
451void ossl_rcu_assign_uptr(void **p, void **v)
452{
453 InterlockedExchangePointer((void * volatile *)p, (void *)*v);
454}
455
456
457CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
458{
459 CRYPTO_RWLOCK *lock;
460# ifdef USE_RWLOCK
461 CRYPTO_win_rwlock *rwlock;
462
463 if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL)
464 /* Don't set error, to avoid recursion blowup. */
465 return NULL;
466 rwlock = lock;
467 InitializeSRWLock(&rwlock->lock);
468# else
469
470 if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL)
471 /* Don't set error, to avoid recursion blowup. */
472 return NULL;
473
474# if !defined(_WIN32_WCE)
475 /* 0x400 is the spin count value suggested in the documentation */
476 if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) {
477 OPENSSL_free(lock);
478 return NULL;
479 }
480# else
481 InitializeCriticalSection(lock);
482# endif
483# endif
484
485 return lock;
486}
487
488__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
489{
490# ifdef USE_RWLOCK
491 CRYPTO_win_rwlock *rwlock = lock;
492
493 AcquireSRWLockShared(&rwlock->lock);
494# else
495 EnterCriticalSection(lock);
496# endif
497 return 1;
498}
499
500__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
501{
502# ifdef USE_RWLOCK
503 CRYPTO_win_rwlock *rwlock = lock;
504
505 AcquireSRWLockExclusive(&rwlock->lock);
506 rwlock->exclusive = 1;
507# else
508 EnterCriticalSection(lock);
509# endif
510 return 1;
511}
512
513int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
514{
515# ifdef USE_RWLOCK
516 CRYPTO_win_rwlock *rwlock = lock;
517
518 if (rwlock->exclusive) {
519 rwlock->exclusive = 0;
520 ReleaseSRWLockExclusive(&rwlock->lock);
521 } else {
522 ReleaseSRWLockShared(&rwlock->lock);
523 }
524# else
525 LeaveCriticalSection(lock);
526# endif
527 return 1;
528}
529
530void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
531{
532 if (lock == NULL)
533 return;
534
535# ifndef USE_RWLOCK
536 DeleteCriticalSection(lock);
537# endif
538 OPENSSL_free(lock);
539
540 return;
541}
542
543# define ONCE_UNINITED 0
544# define ONCE_ININIT 1
545# define ONCE_DONE 2
546
547/*
548 * We don't use InitOnceExecuteOnce because that isn't available in WinXP which
549 * we still have to support.
550 */
551int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
552{
553 LONG volatile *lock = (LONG *)once;
554 LONG result;
555
556 if (*lock == ONCE_DONE)
557 return 1;
558
559 do {
560 result = InterlockedCompareExchange(lock, ONCE_ININIT, ONCE_UNINITED);
561 if (result == ONCE_UNINITED) {
562 init();
563 *lock = ONCE_DONE;
564 return 1;
565 }
566 } while (result == ONCE_ININIT);
567
568 return (*lock == ONCE_DONE);
569}
570
571int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
572{
573 *key = TlsAlloc();
574 if (*key == TLS_OUT_OF_INDEXES)
575 return 0;
576
577 return 1;
578}
579
580void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
581{
582 DWORD last_error;
583 void *ret;
584
585 /*
586 * TlsGetValue clears the last error even on success, so that callers may
587 * distinguish it successfully returning NULL or failing. It is documented
588 * to never fail if the argument is a valid index from TlsAlloc, so we do
589 * not need to handle this.
590 *
591 * However, this error-mangling behavior interferes with the caller's use of
592 * GetLastError. In particular SSL_get_error queries the error queue to
593 * determine whether the caller should look at the OS's errors. To avoid
594 * destroying state, save and restore the Windows error.
595 *
596 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx
597 */
598 last_error = GetLastError();
599 ret = TlsGetValue(*key);
600 SetLastError(last_error);
601 return ret;
602}
603
604int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
605{
606 if (TlsSetValue(*key, val) == 0)
607 return 0;
608
609 return 1;
610}
611
612int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
613{
614 if (TlsFree(*key) == 0)
615 return 0;
616
617 return 1;
618}
619
620CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
621{
622 return GetCurrentThreadId();
623}
624
625int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
626{
627 return (a == b);
628}
629
630int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
631{
632 *ret = (int)InterlockedExchangeAdd((LONG volatile *)val, (LONG)amount)
633 + amount;
634 return 1;
635}
636
637int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
638 CRYPTO_RWLOCK *lock)
639{
640#if (defined(NO_INTERLOCKEDOR64))
641 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
642 return 0;
643 *val += op;
644 *ret = *val;
645
646 if (!CRYPTO_THREAD_unlock(lock))
647 return 0;
648
649 return 1;
650#else
651 *ret = (uint64_t)InterlockedAdd64((LONG64 volatile *)val, (LONG64)op);
652 return 1;
653#endif
654}
655
656int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
657 CRYPTO_RWLOCK *lock)
658{
659#if (defined(NO_INTERLOCKEDOR64))
660 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
661 return 0;
662 *val &= op;
663 *ret = *val;
664
665 if (!CRYPTO_THREAD_unlock(lock))
666 return 0;
667
668 return 1;
669#else
670 *ret = (uint64_t)InterlockedAnd64((LONG64 volatile *)val, (LONG64)op) & op;
671 return 1;
672#endif
673}
674
675int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
676 CRYPTO_RWLOCK *lock)
677{
678#if (defined(NO_INTERLOCKEDOR64))
679 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
680 return 0;
681 *val |= op;
682 *ret = *val;
683
684 if (!CRYPTO_THREAD_unlock(lock))
685 return 0;
686
687 return 1;
688#else
689 *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op;
690 return 1;
691#endif
692}
693
694int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
695{
696#if (defined(NO_INTERLOCKEDOR64))
697 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
698 return 0;
699 *ret = *val;
700 if (!CRYPTO_THREAD_unlock(lock))
701 return 0;
702
703 return 1;
704#else
705 *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0);
706 return 1;
707#endif
708}
709
710int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
711{
712#if (defined(NO_INTERLOCKEDOR64))
713 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
714 return 0;
715 *dst = val;
716 if (!CRYPTO_THREAD_unlock(lock))
717 return 0;
718
719 return 1;
720#else
721 InterlockedExchange64(dst, val);
722 return 1;
723#endif
724}
725
726int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
727{
728#if (defined(NO_INTERLOCKEDOR64))
729 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
730 return 0;
731 *ret = *val;
732 if (!CRYPTO_THREAD_unlock(lock))
733 return 0;
734
735 return 1;
736#else
737 /* On Windows, LONG (but not long) is always the same size as int. */
738 *ret = (int)InterlockedOr((LONG volatile *)val, 0);
739 return 1;
740#endif
741}
742
743int openssl_init_fork_handlers(void)
744{
745 return 0;
746}
747
748int openssl_get_fork_id(void)
749{
750 return 0;
751}
752#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette