Académique Documents
Professionnel Documents
Culture Documents
SYNOPSIS
#include <pthread.h>
The pthread_mutex_init() function initialises the mutex referenced by mutex with attributes specified by attr. If
attr is NULL, the default mutex attributes are used; the effect is the same as passing the address of a default
mutex attributes object. Upon successful initialisation, the state of the mutex becomes initialised and unlocked.
#include <pthread.h>
pthread_mutex_t mp = PTHREAD_MUTEX_INITIALIZER;
pthread_mutexattr_t mattr;
int ret;
NAME
pthread_mutex_lock, pthread_mutex_trylock, pthread_mutex_unlock - lock
and unlock a mutex
SYNOPSIS
#include <pthread.h>
pthread_mutex_init()
Initialize mutex
Synopsis:
#include <pthread.h>
int pthread_mutex_init(
pthread_mutex_t* mutex,
const pthread_mutexattr_t* attr );
Arguments:
mutex
A pointer to the pthread_mutex_t object that you want to initialize.
attr
NULL, or a pointer to a pthread_mutexattr_t object that specifies the attributes that you want to use for the mutex. For more
information, see pthread_mutexattr_init().
Library:
libc
Use the -l c option to qcc to link against this library. This library is usually included automatically.
Description:
The pthread_mutex_init() function initializes the given mutex object, using the attributes specified by the mutex attributes object attr. If attr is NULL,
then the mutex is initialized with the default attributes (see pthread_mutexattr_init()). After initialization, the mutex is in an unlocked state.
You can initialize a statically allocated mutex with the default attributes by assigning to it the macro PTHREAD_MUTEX_INITIALIZER or
PTHREAD_RMUTEX_INITIALIZER (for recursive mutexes).
pthread_mutexattr_init()
Synopsis:
#include <pthread.h>
Arguments:
attr
A pointer to the pthread_mutexattr_t object that you want to initialize.
Library:
libc
Use the -l c option to qcc to link against this library. This library is usually included automatically.
Description:
The pthread_mutexattr_init() function initializes the attributes in the mutex attribute object attr to their default values. After initializing a mutex
attribute object, you can use it to initialize one or more mutexes by calling pthread_mutex_init().
__protocol
PTHREAD_PRIO_INHERIT--when a thread is blocking higher-priority threads by locking one or more mutexes with this attribute, the thread's
priority is raised to that of the highest priority thread waiting on the PTHREAD_PRIO_INHERIT mutex.
__recursive
PTHREAD_RECURSIVE_DISABLE--threads can't recursively lock a mutex; any thread that tries to lock an already locked mutex becomes
blocked.
Mutex Example
/* in /cs/cs3013/public/example/mutexthr.c */
#include <stdio.h>
#include <pthread.h>
IncrementX()
{
int Temp; /* local variable */
BeginRegion()
{
pthread_mutex_lock(&mutex);
}
EndRegion()
{
pthread_mutex_unlock(&mutex);
}
#include <stdio.h>
#include <pthread.h>
while( 1 ) {
pthread_mutex_lock( &mutex );
tmp = count++;
pthread_mutex_unlock( &mutex );
printf( "Count is %d\n", tmp );
return 0;
}
while( 1 ) {
pthread_mutex_lock( &mutex );
tmp = count--;
pthread_mutex_unlock( &mutex );
printf( "** Count is %d\n", tmp );
return 0;
}
return 0;
}
Synchronization Example
/* in /cs/cs3013/public/example/pcthreads.c */
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
n = 0;
if (sem_init(&consumed, 0, 0) < 0) {
perror("sem_init");
exit(1);
}
if (sem_init(&produced, 0, 1) < 0) {
perror("sem_init");
exit(1);
}
if (pthread_create(&idprod, NULL, produce, (void *)loopcnt) != 0) {
perror("pthread_create");
exit(1);
}
if (pthread_create(&idcons, NULL, consume, (void *)loopcnt) != 0) {
perror("pthread_create");
exit(1);
}
(void)pthread_join(idprod, NULL);
(void)pthread_join(idcons, NULL);
(void)sem_destroy(&produced);
(void)sem_destroy(&consumed);
}
int count = 0;
void * ThreadAdd(void * a)
{
int i, tmp;
for(i = 0; i < NITER; i++)
{
tmp = count; /* copy the global count locally */
tmp = tmp+1; /* increment the local copy */
count = tmp; /* store the local value into the global count */
}
}
pthread_exit(NULL);
}
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
!!!!!
n = 0;
sem_init(&consumed, 0, 0) < 0)
sem_init(&produced, 0, 1) < 0)
(void)pthread_join(idprod, NULL);
(void)pthread_join(idcons, NULL);
(void)sem_destroy(&produced);
(void)sem_destroy(&consumed);
}
typedef struct {
int buf[BUFF_SIZE]; /* shared var */
int in; /* buf[in%BUFF_SIZE] is the first empty slot */
int out; /* buf[out%BUFF_SIZE] is the first full slot */
sem_t full; /* keep track of the number of full spots */
sem_t empty; /* keep track of the number of empty spots */
sem_t mutex; /* enforce mutual exclusion to shared data */
} sbuf_t;
sbuf_t shared;
void *Producer(void *arg)
{
int i, item, index;
index = (int)arg;
for (i=0; i < NITERS; i++) {
/* Produce item */
item = i;
printf("[P%d] Producing %d ...\n", index, item); fflush(stdout);
int main()
{
pthread_t idP, idC;
int index;
sem_init(&shared.full, 0, 0);
sem_init(&shared.empty, 0, BUFF_SIZE);
/* global vars */
/* semaphores are declared global so they can be accessed
in main() and in thread routine,
here, the semaphore is used as a mutex */
sem_t mutex;
int counter; /* shared variable */
int main()
{
int i[2];
pthread_t thread_a;
pthread_t thread_b;
/* Note: you can check if thread has been successfully created by checking return
value of pthread_create */
pthread_create (&thread_a, NULL, (void *) &handler, (void *) &i[0]);
pthread_create (&thread_b, NULL, (void *) &handler, (void *) &i[1]);
pthread_join(thread_a, NULL);
pthread_join(thread_b, NULL);
/* exit */
exit(0);
} /* main() */
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <pthread.h>
#include <semaphore.h>
void *thread_function(void *arg);
pthread_mutex_t work_mutex; /* protects both work_area and time_to_exit */
#define WORK_SIZE 1024
char work_area[WORK_SIZE];
int time_to_exit = 0;
int main() {
int res;
pthread_t a_thread;
void *thread_result;
res = pthread_mutex_init(&work_mutex, NULL);
if (res != 0) {
perror("Mutex initialization failed");
exit(EXIT_FAILURE);
}
res = pthread_create(&a_thread, NULL, thread_function, NULL);
if (res != 0) {
perror("Thread creation failed");
exit(EXIT_FAILURE);
}
pthread_mutex_lock(&work_mutex);
printf("Input some text. Enter 'end' to finish\n");
while(!time_to_exit) {
fgets(work_area, WORK_SIZE, stdin);
pthread_mutex_unlock(&work_mutex);
while(1) {
pthread_mutex_lock(&work_mutex);
if (work_area[0] != '\0') {
pthread_mutex_unlock(&work_mutex);
sleep(1);
}
else {
break;
}
}
}
pthread_mutex_unlock(&work_mutex);
printf("\nWaiting for thread to finish...\n");
res = pthread_join(a_thread, &thread_result);
if (res != 0) {
perror("Thread join failed");
exit(EXIT_FAILURE);
}
printf("Thread joined\n");
pthread_mutex_destroy(&work_mutex);
exit(EXIT_SUCCESS);
}
void *thread_function(void *arg) {
sleep(1);
pthread_mutex_lock(&work_mutex);
while(strncmp("end", work_area, 3) != 0) {
printf("You input %d characters\n", strlen(work_area) -1);
work_area[0] = '\0';
pthread_mutex_unlock(&work_mutex);
sleep(1);
pthread_mutex_lock(&work_mutex);
while (work_area[0] == '\0' ) {
pthread_mutex_unlock(&work_mutex);
sleep(1);
pthread_mutex_lock(&work_mutex);
}
}
time_to_exit = 1;
work_area[0] = '\0';
pthread_mutex_unlock(&work_mutex);
pthread_exit(0);
}
Example 1:
Two threads displaying two strings Hello and How are you? independent of each other.
#include <stdio.h>
#include <pthread.h>
#include <stdlib.h>
void * thread1()
{
while(1){
printf("Hello!!\n");
}
}
void * thread2()
{
while(1){
printf("How are you?\n");
}
}
int main()
{
int status;
pthread_t tid1,tid2;
pthread_create(&tid1,NULL,thread1,NULL);
pthread_create(&tid2,NULL,thread2,NULL);
pthread_join(tid1,NULL);
pthread_join(tid2,NULL);
return 0;
}
Now compile this program (Note the -l option is to load the pthread library)
On running, you can see many interleaved Hello!! and How are you? messages
Example 2
This example involves a reader and a writer thread. The reader thread reads a string from the user and writer
thread displays it. This program uses semaphore so as to achieve synchronization
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdlib.h>
char n[1024];
sem_t len;
void * read1()
{
while(1){
printf("Enter a string");
scanf("%s",n);
sem_post(&len);
}
}
void * write1()
{
while(1){
sem_wait(&len);
printf("The string entered is :");
printf("==== %s\n",n);
}
int main()
{
int status;
pthread_t tr, tw;
pthread_create(&tr,NULL,read1,NULL);
pthread_create(&tw,NULL,write1,NULL);
pthread_join(tr,NULL);
pthread_join(tw,NULL);
return 0;
}
On running, in most cases we may be able to achieve a serial read and write( Thread1reads a string and Thread2
displays the same string). But suppose we insert a sleep function() in write1 like
void * write1()
{
while(1){
sleep(5);
sem_wait(&len);
printf("The string entered is :");
printf("==== %s\n",n);
}
}
The thread 1 may read one more string and thread2 displays the last read string. That is no serial read and write
is achieved.
So we may need to use the condition variables to achieve serial read and write.
Example 3
This example involves a reader and a writer thread. The reader thread reads a string from the user and writer
thread displays it. This program uses condition variables to achieve synchronization and achieve serial
programming.
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdlib.h>
#define TRUE 1
#define FALSE 0
char n[1024];
pthread_mutex_t lock= PTHREAD_MUTEX_INITIALIZER;
int string_read=FALSE;
pthread_cond_t cond;
void * read1()
{
while(1){
while(string_read);
pthread_mutex_lock(&lock);
printf("Enter a string: ");
scanf("%s",n);
string_read=TRUE;
pthread_mutex_unlock(&lock);
pthread_cond_signal(&cond);
}
}
void * write1()
{
while(1){
pthread_mutex_lock(&lock);
while(!string_read)
pthread_cond_wait(&cond,&lock);
printf("The string entered is %s\n",n);
string_read=FALSE;
pthread_mutex_unlock(&lock);
}
}
int main()
{
int status;
pthread_t tr, tw;
pthread_create(&tr,NULL,read1,NULL);
pthread_create(&tw,NULL,write1,NULL);
pthread_join(tr,NULL);
pthread_join(tw,NULL);
return 0;
}
sem_t semname;
int qval=1;
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#define NITER 1000000000
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
long count = 0, count1 = 0;
void * ThreadAdd(void * a) {
long i, tmp;
for(i = 0; i < NITER; i++)
{
count++
}
}
usage on Solaris:
gcc thisfile.c -lpthread -lposix4
a.out numIters
*/
#include <pthread.h>
#include <stdio.h>
#include <semaphore.h>
#define SHARED 1
numIters = atoi(argv[1]);
sem_init(&empty, SHARED, 1); /* sem empty = 1 */
sem_init(&full, SHARED, 0); /* sem full = 0 */
printf("main started\n");
pthread_create(&pid, &attr, Producer, NULL);
pthread_create(&cid, &attr, Consumer, NULL);
pthread_join(pid, NULL);
pthread_join(cid, NULL);
printf("main done\n");
}
sem_t mutex;
long count = 0, count1 = 0;
void * ThreadAdd(void * a) {
for(long i = 0; i < NITER; i++)
{
sem_wait(&mutex);
count++;
sem_post(&mutex);}
for(i = 0; i < NITER; i++) count++;
}
sem_destroy(&mutex);
pthread_exit(NULL);
}
OK! count is [2000000000]
BOOM! count is [16372624], should be 2000000000
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <semaphore.h>
typedef struct {
int buf[BUFF_SIZE]; /* shared var */
int in; /* buf[in%BUFF_SIZE] is the first empty slot */
int out; /* buf[out%BUFF_SIZE] is the first full slot */
sem_t full; /* keep track of the number of full spots */
sem_t empty; /* keep track of the number of empty spots */
sem_t mutex; /* enforce mutual exclusion to shared data */
} sbuf_t;
sbuf_t shared;
/* function prototypes */
void *Producer(void *arg);
void *Consumer(void *arg);
int main()
{
/* producer/consumer threads */
pthread_t idP, idC;
/* loop control */
int index;
/* creates semaphores */
sem_init(&shared.full, 0, 0);
sem_init(&shared.empty, 0, BUFF_SIZE);
sem_init(&shared.mutex, 0, 1);
while(1)
{
sleep(1);
sem_wait(&shared.mutex);
temp1 = (consumed * 3600) / j;
temp2 = (produced * 3600) / j;
printf("\n\nStatistics after %d seconds: \n\n", j);
sem_post(&shared.mutex);
printf("So many consumed per hour: %d\n", temp1);
printf("So many produced per hour: %d\n", temp2);
printf("Ratio of consumed/produced per hour: %d\n", ratio);
j++;
}
index = (int)arg;
/* Produce item */
data = produced;
printf("[P%d] Producing %d ...\n\n", index, data); fflush(stdout);
// data=produce_item(shared.buf);
index = (int)arg;
data = consumed;
}
** Semaphores **
One person who realized this years ago was Edsger Dijkstra, known among other
things for his famous "shortest paths" algorithm in graph theory [1], an early
polemic on structured programming entitled "Goto Statements Considered
Harmful" [2] (what a great title!), and, in the case we will study here, the
introduction of a powerful and flexible synchronization primitive known as the
*semaphore* [3].
In this note, we will first describe the semaphore, and then show how it can
be used to solve a number of important synchronization problems.
[SEMAPHORE: DEFINITION]
--------------------------------------------------------------------------------
#include <semaphore.h>
sem_t s;
sem_init(&s, 0, 1);
--------------------------------------------------------------------------------
[FIGURE: INITIALIZING A SEMAPHORE]
--------------------------------------------------------------------------------
int sem_wait(sem_t *s) {
wait until value of semaphore s is greater than 0
decrement the value of semaphore s by 1
}
For now, we are not concerned with the implementation of these routines, which
clearly requires some care; with multiple threads calling into sem_wait() and
sem_post(), there is the obvious need for managing these critical sections
with locks and queues similar to how we previously built locks. We will now
focus on how to *use* these primitives; later we may discuss how they are
built.
A couple of notes. First, we can see that sem_wait() will either return right
away (because the value of the semaphore was 1 or higher when we called
sem_wait()), or it will cause the caller to suspend execution waiting for a
subsequent post. Of course, multiple calling threads may call into sem_wait(),
and thus all be queued waiting to be woken. Once woken, the waiting thread
will then decrement the value of the semaphore and return to the user.
Second, we can see that sem_post() does not ever suspend the caller. Rather,
it simply increments the value of the semaphore and then, if there is a thread
waiting to be woken, wakes 1 of them up.
You should not worry here about the seeming race conditions possible within
the semaphore; assume that the modifications they make to the state of the
semaphore are all performed atomically.
We are now ready to use a semaphore. Our first use will be one with which we
are already familiar: using a semaphore as a lock. Here is a code snippet:
--------------------------------------------------------------------------------
sem_t m;
sem_init(&m, 0, X); // initialize semaphore to X; what should X be?
sem_wait(&m);
// critical section here
sem_post(&m);
--------------------------------------------------------------------------------
[FIGURE: A SEMAPHORE AS A LOCK]
The more interesting case arises when thread 0 holds the lock (i.e., it has
called sem_wait() but not yet called sem_post()), and another thread (thread
1, say) tries to enter the critical section by calling sem_wait(). In this
case, thread 1 will find that the value of the semaphore is 0, and thus wait
(putting itself to sleep and relinquishing the processor). When thread 0 runs
again, it will eventually call sem_post(), incrementing the value of the
semaphore back to 1, and then wake the waiting thread 0, which will then be
able to acquire the lock for itself.
In this basic way, we are able to use semaphores as locks. Because the value
of the semaphore simply alternates between 1 and 0, this usage is sometimes
known as a *binary semaphore*.
Semaphores are also useful when a thread wants to halt its own progress
waiting for something to change. For example, a thread may wish to wait for
a list to become non-empty, so that it can take an element off of the list.
In this pattern of usage, we often find a thread *waiting* for something to
happen, and a different thread making that something happen and
then *signaling* that it has indeed happened, thus waking the waiting
thread. Because the waiting thread (or threads, really) is waiting for some
*condition* in the program to change, we are using the semaphore as a
*condition variable*. We will see condition variables again later,
particularly when covering monitors.
--------------------------------------------------------------------------------
void *
child(void *arg) {
printf("child\n");
// signal here: child is done
return NULL;
}
int
main(int argc, char *argv[]) {
printf("parent: begin\n");
pthread_t c;
Pthread_create(c, NULL, child, NULL);
// wait here for child
printf("parent: end\n");
return 0;
}
--------------------------------------------------------------------------------
[FIGURE: PARENT WAITING FOR CHILD]
--------------------------------------------------------------------------------
parent: begin
child
parent: end
--------------------------------------------------------------------------------
[FIGURE: OUTPUT FROM PARENT WAITING FOR CHILD]
The question, then, is how to use a semaphore to achieve this effect, and is
it turns out, it is quite simple, as we see here:
--------------------------------------------------------------------------------
sem_t s;
void *
child(void *arg) {
printf("child\n");
// signal here: child is done
sem_post(&s);
return NULL;
}
int
main(int argc, char *argv[]) {
sem_init(&s, 0, X); // what should X be?
printf("parent: begin\n");
pthread_t c;
Pthread_create(c, NULL, child, NULL);
// wait here for child
sem_wait(&s);
printf("parent: end\n");
return 0;
}
--------------------------------------------------------------------------------
[FIGURE: PARENT WAITING FOR CHILD WITH A SEMAPHORE]
As you can see in the code, the parent simply calls sem_wait() and the child
sem_post() to wait for the condition of the child finishing its execution to
become true. However, this raises the question: what should the initial value
of this semaphore be? (think about it here, instead of reading ahead)
The answer, of course, is that the value of the semaphore should be set to is
the number 0. There are two cases to consider. First, let us assume that the
parent creates the child but the child has not run yet (i.e., it is sitting in
a ready queue but not running). In this case, the parent will call sem_wait()
before the child has called sem_post(), and thus we'd like the parent to wait
for the child to run. The only way this will happen is if the value of the
semaphore is not greater than 0; hence, 0 as the initial value makes
sense. When the child finally runs, it will call sem_post(), incrementing the
value to 1 and waking the parent, which will then return from sem_wait() and
complete the program.
The second case occurs when the child runs to completion before the parent
gets a chance to call sem_wait(). In this case, the child will first call
sem_post(), thus incrementing the value of the semaphore from 0 to 1. When the
parent then gets a chance to run, it will call sem_wait() and find the value
of the semaphore to be 1; the parent will thus decrement the value and return
from sem_wait() without waiting, also achieving the desired effect.
This arrangement occurs in many places within real systems. For example, in a
multithread web server, a producer puts HTTP requests into a work queue (i.e.,
the bounded buffer); a thread pool of consumers each take a request out of the
work queue and process the request. Similarly, when you use a piped command in
a UNIX shell, as follows:
This example runs two processes concurrently; "cat file" writes the body of
the file "notes.txt" to what it thinks is standard output; instead, however,
the UNIX shell has redirected the output to what is called a UNIX pipe
(created by the *pipe()* system call). The other end of this pipe is connected
to the standard input of the process "wc", which simply counts the number of
lines in the input stream and prints out the result. Thus, the "cat" process
is the producer, and the "wc" process is the consumer. Between them is a
bounded buffer.
--------------------------------------------------------------------------------
int buffer[MAX];
int fill = 0;
int use = 0;
int get() {
int tmp = buffer[use]; // line G1
use = (use + 1) % MAX; // line G2
return tmp;
}
--------------------------------------------------------------------------------
[FIGURE: THE PUT AND GET ROUTINES]
In this example, we assume that the shared buffer *buffer* is just an array of
integers (this could easily be generalized to arbitrary objects, of course),
and that the *fill* and *use* integers are used as indices into the array,
and are used to track where to both put data (fill) and get data (use).
Let us assume in this simple example that we have just two threads, a producer
and a consumer, and that the producer just writes some number of integers into
the buffer which the consumer removes from the buffer and prints:
--------------------------------------------------------------------------------
void *producer(void *arg) {
int i;
for (i = 0; i < loops; i++) {
put(i);
}
}
Finally, here is the main body of the program, which simply creates the
two threads and waits for them to finish.
--------------------------------------------------------------------------------
int loops = 0;
If the program is run with loops = 5, what we'd like to get is the producer
"producing" 0, 1, 2, 3, and 4, and the consumer printing them in that
order. However, without synchronization, we may not get that. For example,
imagine if the consumer thread runs first; it will call get() to get data that
hasn't even been produced yet, and thus not function as desired. Things get
worse when you add multiple producers or consumers, as there could be race
conditions in the update of the use or fill indices. Clearly, something is
missing.
Our first attempt at solving the problem introduces two semaphores, *empty*
and *full*, which the threads will use to indicate when a buffer entry has
been emptied or filled, respectively. Here is the example code:
--------------------------------------------------------------------------------
sem_t empty;
sem_t full;
In this example, the producer first waits for a buffer to become empty in
order to put data into it, and the consumer similarly waits for a buffer to
become filled before using it. Let us first imagine that MAX=1 (there is only
one buffer in the array), and see if this works.
Imagine again there are two threads, a producer and a consumer. Let us examine
a specific scenario on a single CPU. Assume the consumer gets to run
first. Thus, the consumer will hit line C1 in the figure above, calling
sem_wait(&full). Because full was initialized to the value 0, the call will
block the consumer and wait for another thread to call sem_post() on the
semaphore, as desired.
Let's say the producer then runs. It will hit line P1, calling
sem_wait(&empty). Unlike the consumer, the producer will continue through
this line, because empty was initialized to the value MAX (in this case,
1). Thus, empty will be decremented to 0 and the producer will put a data
value into the first entry of buffer (line P2). The producer will then
continue on to P3 and call sem_post(&full), changing the value of the full
semaphore from 0 to 1 and waking the consumer (e.g., move it from blocked to
ready).
In this case, one of two things could happen. If the producer continues to
run, it will loop around and hit line P1 again. This time, however, it would
block, as the empty semaphore's value is 0. If the producer instead was
interrupted and the consumer began to run, it would call sem_wait(&full)
(line C1) and find that the buffer was indeed full and thus consume it.
In either case, we achieve the desired behavior.
You can try this same example with more threads (e.g., multiple producers, and
multiple consumers). It should still work, or it is time to go to sleep.
Let us now imagine that MAX > 1 (say MAX = 10). For this example, let us
assume that there are multiple producers and multiple consumers. We now have a
problem: a *race condition*. Do you see where it occurs? (take some time and
look for it)
If you can't see it, here's a hint: look more closely at the put() and get()
code.
If you still can't see it, I bet you aren't trying. Come on, spend a minute on
it at least.
OK, you win. Imagine two producers both calling into put() at roughly the same
time. Assume producer 1 gets to run first, and just starts to fill the first
buffer entry (fill = 0 @ line F1). Before the producer gets a chance to
increment the fill counter to 1, it is interrupted. Producer 2 starts to run,
and at line F1 it also puts its data into the 0th element of buffer, which
means that the old data there is overwritten! This is a no-no; we don't want
any data generated by a producer to be lost.
As you can see, what we've forgotten here is *mutual exclusion*. The filling
of a buffer and incrementing of the index into the buffer is a *critical
section*, and thus must be guarded carefully. So let's use our friend the
binary semaphore and add some locks. Here is our first try:
--------------------------------------------------------------------------------
sem_t empty;
sem_t full;
sem_t mutex;
Now we've added some locks around the entire put()/get() parts of the code, as
indicated by the NEW LINE comments. That seems like the right idea, but it
also doesn't work. Why? Deadlock
Why does deadlock occur? Take a moment to consider it; try to find a case
where deadlock arises; what sequence of steps must happen for the program to
deadlock?
OK, now that you figured it out, here is the answer. Imagine two threads, one
producer and one consumer. The consumer gets to run first. It acquires the
mutex (line C0), and then calls sem_wait() on the full semaphore (line C1);
because there is no data yet, this call causes the consumer to block and thus
yield the CPU; importantly, though, the consumer still *holds* the lock.
A producer then runs. It has data to produce and if it were able to run, it
would be able to wake the consumer thread and all would be
good. Unfortunately, the first thing it does is call sem_wait on the binary
mutex semaphore (line P0). The lock is already held. Hence, the producer is
now stuck waiting too.
There is a simple cycle here. The consumer *holds* the mutex and is *waiting*
for the someone to signal full. The producer could *signal* full but is
*waiting* for the mutex. Thus, the producer and consumer are each stuck
waiting for each other: a classic deadlock.
--------------------------------------------------------------------------------
sem_t empty;
sem_t full;
sem_t mutex;
As you can see, we simply move the mutex acquire and release to be just around
the critical section; the full and empty wait and signal code is left outside.
The result is a simple and working bounded buffer, a commonly-used pattern in
multithreaded programs. Understand it now; use it later. You will thank me for
years to come. Or not.
[A READER-WRITER LOCK]
Another classic problem stems from the desire for a more flexible locking
primitive that admits that different data structure accesses might require
different kinds of locking. For example, imagine a number of concurrent list
operations, including inserts and simple lookups. While inserts change the
state of the list (and thus a traditional critical section makes sense),
inserts simply *read* the data structure; as long as we can guarantee that no
insert is on-going, we can allow many lookups to proceed concurrently. The
special type of lock we will now develop to support this type of operation is
known as a *reader-writer lock*. The code for such a lock is available here:
--------------------------------------------------------------------------------
typedef struct _rwlock_t {
sem_t writelock;
sem_t lock;
int readers;
} rwlock_t;
The code is pretty simple. If some thread wants to update the data structure
in question, it should call the pair of operations rwlock_acquire_writelock()
and rwlock_release_writelock(). Internally, these simply use the "writelock"
semaphore to ensure that only a single writer can acquire the lock and thus
enter the critical section to update the data structure in question.
Thus, once a reader has acquired a read lock, more readers will be allowed to
acquire the read lock too; however, any thread that wishes to acquire the
write lock will have to wait until *all* readers are finished; the last one to
exit the critical section will call sem_post() on "writelock" and thus enable
a waiting writer to acquire the lock itself.
This approach works (as desired), but does have some negatives, especially
when it comes to fairness. In particular, it would be relatively easy for
readers to starve writers. More sophisticated solutions to this problem exist;
perhaps you can think of a better implementation? Hint: think about what you
would need to do to prevent more readers from entering the lock once a writer
is waiting.
Finally, it should be noted that reader-writer locks should be used with some
caution. They often add more overhead (especially with more sophisticated
implementations), and thus do not end up speeding up performance as compared
to just using simple and fast locking primitives [7]. Either way, they
showcase once again how we can use semaphores in an interesting and useful
way.
If I weren't lazy, I would write a bit about one of the most famous
concurrency problems posed and solved by Dijkstra, known as the *dining
philosopher's problem* [9]. However, I am lazy. The problem is famous because
it is fun and somewhat intellectually interesting; however, its practical
utility is low. I am a practical kind of guy, and thus have a hard time
motivating the time spent to understand something that is so clearly
academic. Look it up on your own if you are interested.
[SUMMARY]
In this note, we have presented just a few classic problems and solutions. If
you are interested in finding out more, there are many other materials you can
reference. One great (and free reference) is Allen Downey's book on
concurrency [6]. This book has lots of puzzles you can work on to improve your
understanding of both semaphores in specific and concurrency in general.
Becoming a real concurrency expert takes years of effort; going beyond what
you learn in class is a key component to mastering such a topic.
[REFERENCES]
[4] Historically, sem_wait() was first called P() by Dijkstra (for the dutch
word "to probe") and sem_post() was called V() (for the dutch word "to test").
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdlib.h>
char n[1024];
sem_t len;
void * read1()
{
while(1){
printf("Enter a string");
scanf("%s",n);
sem_post(&len);
}
}
void * write1()
{
while(1){
sem_wait(&len);
printf("The string entered is :");
printf("==== %s\n",n);
}
int main()
{
int status;
pthread_t tr, tw;
pthread_create(&tr,NULL,read1,NULL);
pthread_create(&tw,NULL,write1,NULL);
pthread_join(tr,NULL);
pthread_join(tw,NULL);
return 0;
}
On running, in most cases we may be able to achieve a serial read and write( Thread1reads a string and Thread2
displays the same string). But suppose we insert a sleep function() in write1 like
void * write1()
{
while(1){
sleep(5);
sem_wait(&len);
printf("The string entered is :");
printf("==== %s\n",n);
}
}
The thread 1 may read one more string and thread2 displays the last read string. That is no serial read and write
is achieved.
So we may need to use the condition variables to achieve serial read and write.
Example 3
This example involves a reader and a writer thread. The reader thread reads a string from the user and writer
thread displays it. This program uses condition variables to achieve synchronization and achieve serial
programming.
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdlib.h>
#define TRUE 1
#define FALSE 0
char n[1024];
pthread_mutex_t lock= PTHREAD_MUTEX_INITIALIZER;
int string_read=FALSE;
pthread_cond_t cond;
void * read1()
{
while(1){
while(string_read);
pthread_mutex_lock(&lock);
printf("Enter a string: ");
scanf("%s",n);
string_read=TRUE;
pthread_mutex_unlock(&lock);
pthread_cond_signal(&cond);
}
}
void * write1()
{
while(1){
pthread_mutex_lock(&lock);
while(!string_read)
pthread_cond_wait(&cond,&lock);
printf("The string entered is %s\n",n);
string_read=FALSE;
pthread_mutex_unlock(&lock);
}
}
int main()
{
int status;
pthread_t tr, tw;
pthread_create(&tr,NULL,read1,NULL);
pthread_create(&tw,NULL,write1,NULL);
pthread_join(tr,NULL);
pthread_join(tw,NULL);
return 0;
}
In the beginning, I started the discussion that threads are used to achieve concurrency. But the above examples
can be easily done by simple scanf and printf i.e.,
scanf(%s,n);
printf(%s,n);
But these examples were given to demonstrate the semaphores and condition variables. Example 3 can be
further modified to design a reader/writer application. Example string_read boolean variable can be converted
to a string_count variable
Comparing APIs for Solaris Threads and POSIX
Threads
The Solaris threads API and the pthreads API are two solutions to the same problem: build parallelism into
application software. Although each API is complete, you can safely mix Solaris threads functions and pthread
functions in the same program.
The two APIs do not match exactly, however. Solaris threads support functions that are not found in pthreads,
and pthreads include functions that are not supported in the Solaris interface. For those functions that do match,
the associated arguments might not, although the information content is effectively the same.
By combining the two APIs, you can use features not found in one API to enhance the other API. Similarly, you
can run applications that use Solaris threads exclusively with applications that use pthreads exclusively on the
same system.
Solaris threads and pthreads are very similar in both API action and syntax. The major differences are listed in
Table 61 .
thr_ prefix for threads function names, sema_ prefix pthread_ prefix for pthreads function names, sem_
for semaphore function names prefix for semaphore function names
The following table compares Solaris threads functions with pthreads functions. Note that even when Solaris
threads and pthreads functions appear to be essentially the same, the arguments to the functions can differ.
When a comparable interface is not available either in pthreads or Solaris threads, a hyphen `-' appears in the
column. Entries in the pthreads column that are followed by (3RT) are functions in librt, the POSIX.1b
Realtime Extensions library, which is not part of pthreads. Functions in this library provide most of the
interfaces specified by the POSIX.1b Realtime Extension.
thr_create() pthread_create()
Solaris Threads pthreads
thr_exit() pthread_exit()
thr_join() pthread_join()
thr_yield() sched_yield()(3RT)
thr_self() pthread_self()
thr_kill() pthread_kill()
thr_sigsetmask() pthread_sigmask()
thr_setprio() pthread_setschedparam()
thr_getprio() pthread_getschedparam()
thr_setconcurrency() pthread_setconcurrency()
thr_getconcurrency() pthread_getconcurrency()
thr_suspend() -
thr_continue() -
thr_keycreate() pthread_key_create()
- pthread_key_delete()
thr_setspecific() pthread_setspecific()
thr_getspecific() pthread_getspecific()
Solaris Threads pthreads
- pthread_once()
- pthread_equal()
- pthread_cancel()
- pthread_testcancel()
- pthread_cleanup_push()
- pthread_cleanup_pop()
- pthread_setcanceltype()
- pthread_setcancelstate()
mutex_lock() pthread_mutex_lock()
mutex_unlock() pthread_mutex_unlock()
mutex_trylock() pthread_mutex_trylock()
mutex_init() pthread_mutex_init()
mutex_destroy() pthread_mutex_destroy()
cond_wait() pthread_cond_wait()
cond_timedwait() pthread_cond_timedwait()
cond_reltimedwait() pthread_cond_reltimedwait_np()
Solaris Threads pthreads
cond_signal() pthread_cond_signal()
cond_broadcast() pthread_cond_broadcast()
cond_init() pthread_cond_init()
cond_destroy() pthread_cond_destroy()
rwlock_init() pthread_rwlock_init()
rwlock_destroy() pthread_rwlock_destroy()
rw_rdlock() pthread_rwlock_rdlock()
rw_wrlock() pthread_rwlock_wrlock()
rw_unlock() pthread_rwlock_unlock()
rw_tryrdlock() pthread_rwlock_tryrdlock()
rw_trywrlock() pthread_rwlock_trywrlock()
- pthread_rwlockattr_init()
- pthread_rwlockattr_destroy()
- pthread_rwlockattr_getpshared()
- pthread_rwlockattr_setpshared()
sema_init() sem_init()(3RT)
Solaris Threads pthreads
sema_destroy() sem_destroy()(3RT)
sema_wait() sem_wait()(3RT)
sema_post() sem_post()(3RT)
sema_trywait() sem_trywait()(3RT)
fork1() fork()
- pthread_atfork()
- pthread_mutexattr_init()
- pthread_mutexattr_destroy()
- pthread_mutexattr_getpshared()
- pthread_mutex_attr_settype()
- pthread_mutex_attr_gettype()
- pthread_condattr_init()
- pthread_condattr_destroy()
- pthread_condattr_getpshared()
- pthread_attr_init()
- pthread_attr_destroy()
- pthread_attr_getscope()
- pthread_attr_setguardsize()
- pthread_attr_getguardsize()
- pthread_attr_getstacksize()
- pthread_attr_getstack()
- pthread_attr_getdetachstate()
- pthread_attr_setschedparam()
- pthread_attr_getschedparam()
- pthread_attr_setinheritsched()
Solaris Threads pthreads
- pthread_attr_getinheritsched()
- pthread_attr_setsschedpolicy()
- pthread_attr_getschedpolicy()
To use the Solaris threads functions described in this chapter for Solaris 9 and previous releases, you must link
with the Solaris threads library lthread .
Operation is virtually the same for both Solaris threads and for pthreads, even though the function names or
arguments might differ. Only a brief example consisting of the correct include file and the function prototype is
presented. Where return values are not given for the Solaris threads functions, see the appropriate pages in man
pages section 3: Basic Library Functions for the function return values.
For more information on Solaris related functions, see the related pthreads documentation for the similarly
named function.
Where Solaris threads functions offer capabilities that are not available in pthreads, a full description of the
functions is provided