-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlinked-list.c
More file actions
152 lines (140 loc) · 3.68 KB
/
linked-list.c
File metadata and controls
152 lines (140 loc) · 3.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
/* This is free and unencumbered software released into the public domain.
* Refer to LICENSE.txt in this directory. */
/**
* \brief The worst linked list in the world
*
* An example of a thread-unsafe linked list being used to pass
* blocks of data between threads. Because the threads spend most
* of their time doing computation (vs manipulating the linked list)
* this example only fails rarely when run natively.
*/
#include <pthread.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
/**
* \brief Trivial singly linked list structure
*/
struct list
{
struct list *next;
void *data;
};
typedef struct list list_t;
static list_t list_head;
static int iterations = 0;
/**
* \brief Buggy function to push to head of a list
*
* This function is subject to a race where `list_head.next`
* can be updated between when it is read and when it is updated.
*/
static void
s_push(list_t *item)
{
/* Pairs with assignment to list_head.next */
list_t *tmp = __atomic_load_n(&list_head.next, __ATOMIC_ACQUIRE);
/* Thread switch here is bad */
item->next = tmp;
/* Thread switch here is bad */
__atomic_store_n(&list_head.next, item, __ATOMIC_RELEASE);
}
/**
* \brief Buggy function to pop from the head of a list
*
* Similar to the pop function, the `list_head.next` may be updated
* after `item` has been read.
*/
static void *
s_pop(void)
{
/* Pairs with the assignment to list_head.next */
list_t *item = __atomic_load_n(&list_head.next, __ATOMIC_ACQUIRE);
/* Thread switch here is bad */
if (!item)
{
return NULL;
}
/* Thread switch here is bad */
__atomic_store_n(&list_head.next, item->next, __ATOMIC_RELEASE);
return item;
}
/**
* \brief Consume CPU cycles / run up the bbcount
*
* We use a busy function to simulate an application where threads mostly
* do work independently and only rarely synchronise. If the compute time
* is variable enough (and in practice it often is) then only one thread at
* a time runs the synchronisation routine, meaning it can have race conditions
* which go unnoticed (or are very hard to reproduce).
*/
static void
s_compute_junk(void)
{
long p = 0;
unsigned long q = 0;
const int i = iterations;
for (; p < i; ++p)
{
__atomic_fetch_add(&q, p, __ATOMIC_RELAXED);
}
}
/**
* \brief Push items onto list between lots of computation
*/
static void *
s_producer(void *arg __attribute__((unused)))
{
while (true)
{
/* Allocate a block of data and submit it to the list */
list_t *item = malloc(sizeof *item);
s_push(item);
for (int i = 0; i < 10; ++i)
{
s_compute_junk();
}
}
return NULL;
}
/**
* \brief Pop items from list between doing lots of computation
*/
static void *
s_consumer(void *arg __attribute__((unused)))
{
while (true)
{
list_t *item = s_pop();
if (!item)
{
/* We've consumed all we can, spin */
s_compute_junk();
}
else
{
/* A real application would do some work on the data it
* got from the list, we just free it.
*/
free(item);
}
}
return NULL;
}
int
main(int argc, char **argv)
{
if (argc != 2)
{
/* On a typical thinkpad 10000 is a good default value */
fprintf(stderr, "use: %s iterations (10000 is a good value to try)\n", argv[0]);
return 1;
}
iterations = atoi(argv[1]);
pthread_t t_producer, t_consumer;
pthread_create(&t_producer, NULL, s_producer, NULL);
pthread_create(&t_consumer, NULL, s_consumer, NULL);
pthread_join(t_producer, NULL);
return 0;
}