aboutsummaryrefslogtreecommitdiff
path: root/pfinet/io-ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'pfinet/io-ops.c')
-rw-r--r--pfinet/io-ops.c374
1 files changed, 178 insertions, 196 deletions
diff --git a/pfinet/io-ops.c b/pfinet/io-ops.c
index 72895065..ef8d8513 100644
--- a/pfinet/io-ops.c
+++ b/pfinet/io-ops.c
@@ -1,5 +1,5 @@
-/*
- Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+/*
+ Copyright (C) 1995,96,97,98,99,2000,02 Free Software Foundation, Inc.
Written by Michael I. Bushnell, p/BSG.
This file is part of the GNU Hurd.
@@ -19,78 +19,105 @@
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
#include "pfinet.h"
+
+#include <linux/wait.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
#include "io_S.h"
#include <netinet/in.h>
-#include <linux/wait.h>
-#include <linux-inet/sock.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <mach/notify.h>
+#include <sys/mman.h>
error_t
S_io_write (struct sock_user *user,
char *data,
- u_int datalen,
+ size_t datalen,
off_t offset,
mach_msg_type_number_t *amount)
{
error_t err;
-
+ struct iovec iov = { data, datalen };
+ struct msghdr m = { msg_name: 0, msg_namelen: 0, msg_flags: 0,
+ msg_controllen: 0, msg_iov: &iov, msg_iovlen: 1 };
+
if (!user)
return EOPNOTSUPP;
- mutex_lock (&global_lock);
+ __mutex_lock (&global_lock);
become_task (user);
- err = (*user->sock->ops->write) (user->sock, data, datalen,
- user->sock->userflags);
- mutex_unlock (&global_lock);
+ if (user->sock->flags & O_NONBLOCK)
+ m.msg_flags |= MSG_DONTWAIT;
+ err = (*user->sock->ops->sendmsg) (user->sock, &m, datalen, 0);
+ __mutex_unlock (&global_lock);
- if (err >= 0)
+ if (err < 0)
+ err = -err;
+ else
{
*amount = err;
err = 0;
}
-
+
return err;
}
error_t
S_io_read (struct sock_user *user,
char **data,
- u_int *datalen,
+ size_t *datalen,
off_t offset,
mach_msg_type_number_t amount)
{
error_t err;
int alloced = 0;
+ struct iovec iov;
+ struct msghdr m = { msg_name: 0, msg_namelen: 0, msg_flags: 0,
+ msg_controllen: 0, msg_iov: &iov, msg_iovlen: 1 };
if (!user)
return EOPNOTSUPP;
-
+
/* Instead of this, we should peek and the socket and only
allocate as much as necessary. */
if (amount > *datalen)
{
- vm_allocate (mach_task_self (), (vm_address_t *)data, amount, 1);
+ *data = mmap (0, amount, PROT_READ|PROT_WRITE, MAP_ANON, 0, 0);
+ if (*data == MAP_FAILED)
+ /* Should check whether errno is indeed ENOMEM --
+ but this can't be done in a straightforward way,
+ because the glue headers #undef errno. */
+ return ENOMEM;
alloced = 1;
}
-
- mutex_lock (&global_lock);
+
+ iov.iov_base = *data;
+ iov.iov_len = amount;
+
+ __mutex_lock (&global_lock);
become_task (user);
- err = (*user->sock->ops->read) (user->sock, *data, amount,
- user->sock->userflags);
- mutex_unlock (&global_lock);
-
+ err = (*user->sock->ops->recvmsg) (user->sock, &m, amount,
+ ((user->sock->flags & O_NONBLOCK)
+ ? MSG_DONTWAIT : 0),
+ 0);
+ __mutex_unlock (&global_lock);
+
if (err < 0)
- err = -err;
+ {
+ err = -err;
+ if (alloced)
+ munmap (*data, amount);
+ }
else
{
*datalen = err;
if (alloced && round_page (*datalen) < round_page (amount))
- vm_deallocate (mach_task_self (),
- (vm_address_t) *data + round_page (*datalen),
- round_page (amount) - round_page (*datalen));
+ munmap (*data + round_page (*datalen),
+ round_page (amount) - round_page (*datalen));
err = 0;
}
return err;
@@ -111,19 +138,19 @@ S_io_readable (struct sock_user *user,
{
struct sock *sk;
error_t err;
-
+
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
+
+ __mutex_lock (&global_lock);
become_task (user);
-
+
/* We need to avoid calling the Linux ioctl routines,
so here is a rather ugly break of modularity. */
- sk = (struct sock *) user->sock->data;
+ sk = user->sock->sk;
err = 0;
-
+
/* Linux's af_inet.c ioctl routine just calls the protocol-specific
ioctl routine; it's those routines that we need to simulate. So
this switch corresponds to the initialization of SK->prot in
@@ -132,17 +159,9 @@ S_io_readable (struct sock_user *user,
{
case SOCK_STREAM:
case SOCK_SEQPACKET:
- /* These guts are copied from tcp.c:tcp_ioctl. */
- if (sk->state == TCP_LISTEN)
- err = EINVAL;
- else
- {
- sk->inuse = 1;
- *amount = tcp_readable (sk);
- release_sock (sk);
- }
+ err = tcp_tiocinq (sk, amount);
break;
-
+
case SOCK_DGRAM:
/* These guts are copied from udp.c:udp_ioctl (TIOCINQ). */
if (sk->state == TCP_LISTEN)
@@ -152,14 +171,14 @@ S_io_readable (struct sock_user *user,
*amount = (skb_peek (&sk->receive_queue)
? : &((struct sk_buff){}))->len;
break;
-
+
case SOCK_RAW:
default:
err = EOPNOTSUPP;
break;
}
- mutex_unlock (&global_lock);
+ __mutex_unlock (&global_lock);
return err;
}
@@ -169,13 +188,13 @@ S_io_set_all_openmodes (struct sock_user *user,
{
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
+
+ __mutex_lock (&global_lock);
if (bits & O_NONBLOCK)
- user->sock->userflags |= O_NONBLOCK;
+ user->sock->flags |= O_NONBLOCK;
else
- user->sock->userflags &= ~O_NONBLOCK;
- mutex_unlock (&global_lock);
+ user->sock->flags &= ~O_NONBLOCK;
+ __mutex_unlock (&global_lock);
return 0;
}
@@ -184,22 +203,22 @@ S_io_get_openmodes (struct sock_user *user,
int *bits)
{
struct sock *sk;
-
+
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
- sk = user->sock->data;
-
+
+ __mutex_lock (&global_lock);
+ sk = user->sock->sk;
+
*bits = 0;
if (!(sk->shutdown & SEND_SHUTDOWN))
*bits |= O_WRITE;
if (!(sk->shutdown & RCV_SHUTDOWN))
*bits |= O_READ;
- if (user->sock->userflags & O_NONBLOCK)
+ if (user->sock->flags & O_NONBLOCK)
*bits |= O_NONBLOCK;
-
- mutex_unlock (&global_lock);
+
+ __mutex_unlock (&global_lock);
return 0;
}
@@ -209,11 +228,11 @@ S_io_set_some_openmodes (struct sock_user *user,
{
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
+
+ __mutex_lock (&global_lock);
if (bits & O_NONBLOCK)
- user->sock->userflags |= O_NONBLOCK;
- mutex_unlock (&global_lock);
+ user->sock->flags |= O_NONBLOCK;
+ __mutex_unlock (&global_lock);
return 0;
}
@@ -223,114 +242,63 @@ S_io_clear_some_openmodes (struct sock_user *user,
{
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
+
+ __mutex_lock (&global_lock);
if (bits & O_NONBLOCK)
- user->sock->userflags &= ~O_NONBLOCK;
- mutex_unlock (&global_lock);
+ user->sock->flags &= ~O_NONBLOCK;
+ __mutex_unlock (&global_lock);
return 0;
}
error_t
S_io_select (struct sock_user *user,
- mach_port_t reply, mach_msg_type_name_t reply_type,
+ mach_port_t reply,
+ mach_msg_type_name_t reply_type,
int *select_type)
{
- int avail = 0;
- int cancel = 0;
- int requested_notify = 0;
- select_table table;
- struct select_table_elt *elt, *nxt;
+ const int want = *select_type;
+ int avail;
if (!user)
return EOPNOTSUPP;
- mutex_lock (&global_lock);
+ __mutex_lock (&global_lock);
become_task (user);
- /* In Linux, this means (supposedly) that I/O will never be possible.
+ /* In Linux, this means (supposedly) that I/O will never be possible.
That's a lose, so prevent it from happening. */
- assert (user->sock->ops->select);
+ assert (user->sock->ops->poll);
- /* The select function returns one if the specified I/O type is
- immediately possible. If it returns zero, then it is not
- immediately possible, and it has called select_wait. Eventually
- it will wakeup the wait queue specified in the select_wait call;
- at that point we should retry the call. */
-
- for (;;)
+ avail = (*user->sock->ops->poll) ((void *) 0xdeadbeef,
+ user->sock,
+ (void *) 0xdeadbead);
+ if ((avail & want) == 0)
{
- condition_init (&table.master_condition);
- table.head = 0;
-
- if (*select_type & SELECT_READ)
- avail |= ((*user->sock->ops->select) (user->sock, SEL_IN, &table)
- ? SELECT_READ : 0);
- if (*select_type & SELECT_WRITE)
- avail |= ((*user->sock->ops->select) (user->sock, SEL_OUT, &table)
- ? SELECT_WRITE : 0);
- if (*select_type & SELECT_URG)
- avail |= ((*user->sock->ops->select) (user->sock, SEL_EX, &table)
- ? SELECT_URG : 0);
-
- if (!avail)
+ ports_interrupt_self_on_notification (user, reply,
+ MACH_NOTIFY_DEAD_NAME);
+
+ do
{
- if (! requested_notify)
+ /* Block until we are woken or cancelled. */
+ interruptible_sleep_on (user->sock->sk->sleep);
+ if (signal_pending (current)) /* This means we were cancelled. */
{
- ports_interrupt_self_on_notification (user, reply,
- MACH_NOTIFY_DEAD_NAME);
- requested_notify = 1;
+ __mutex_unlock (&global_lock);
+ return EINTR;
}
- cancel = hurd_condition_wait (&table.master_condition, &global_lock);
- }
-
- /* Drop the conditions implications and structures allocated in the
- select table. */
- for (elt = table.head; elt; elt = nxt)
- {
- condition_unimplies (elt->dependent_condition,
- &table.master_condition);
- nxt = elt->next;
- free (elt);
- }
-
- if (avail)
- {
- mutex_unlock (&global_lock);
- *select_type = avail;
- return 0;
- }
-
- if (cancel)
- {
- mutex_unlock (&global_lock);
- return EINTR;
+ avail = (*user->sock->ops->poll) ((void *) 0xdeadbeef,
+ user->sock,
+ (void *) 0xdeadbead);
}
+ while ((avail & want) == 0);
}
-}
-/* Establish that the condition in WAIT_ADDRESS should imply
- the condition in P. Also, add us to the queue in P so
- that the relation can be undone at the proper time. */
-void
-select_wait (struct wait_queue **wait_address, select_table *p)
-{
- struct select_table_elt *elt;
-
- /* tcp.c happens to use an uninitalized wait queue;
- so this special hack is for that. */
- if (*wait_address == 0)
- {
- *wait_address = malloc (sizeof (struct wait_queue));
- condition_init (&(*wait_address)->c);
- }
+ /* We got something. */
+ *select_type = avail;
- elt = malloc (sizeof (struct select_table_elt));
- elt->dependent_condition = &(*wait_address)->c;
- elt->next = p->head;
- p->head = elt;
+ __mutex_unlock (&global_lock);
- condition_implies (elt->dependent_condition, &p->master_condition);
+ return 0;
}
error_t
@@ -339,14 +307,16 @@ S_io_stat (struct sock_user *user,
{
if (!user)
return EOPNOTSUPP;
-
+
bzero (st, sizeof (struct stat));
-
+
st->st_fstype = FSTYPE_SOCKET;
st->st_fsid = getpid ();
- st->st_ino = (ino_t) user->sock; /* why not? */
-
+ st->st_ino = user->sock->st_ino;
+
+ st->st_mode = S_IFSOCK | ACCESSPERMS;
st->st_blksize = 512; /* ???? */
+
return 0;
}
@@ -357,36 +327,34 @@ S_io_reauthenticate (struct sock_user *user,
struct sock_user *newuser;
uid_t gubuf[20], ggbuf[20], aubuf[20], agbuf[20];
uid_t *gen_uids, *gen_gids, *aux_uids, *aux_gids;
- u_int genuidlen, gengidlen, auxuidlen, auxgidlen;
+ size_t genuidlen, gengidlen, auxuidlen, auxgidlen;
error_t err;
- int i;
+ size_t i, j;
auth_t auth;
mach_port_t newright;
if (!user)
return EOPNOTSUPP;
-
+
genuidlen = gengidlen = auxuidlen = auxgidlen = 20;
gen_uids = gubuf;
gen_gids = ggbuf;
aux_uids = aubuf;
aux_gids = agbuf;
- mutex_lock (&global_lock);
- newuser = make_sock_user (user->sock, 0, 1);
-
+ __mutex_lock (&global_lock);
+ newuser = make_sock_user (user->sock, 0, 1, 0);
+
auth = getauth ();
- newright = ports_get_right (newuser);
- err = mach_port_insert_right (mach_task_self (), newright, newright,
- MACH_MSG_TYPE_MAKE_SEND);
- assert_perror (err);
+ newright = ports_get_send_right (newuser);
+ assert (newright != MACH_PORT_NULL);
do
- err = auth_server_authenticate (auth,
+ err = auth_server_authenticate (auth,
rend,
MACH_MSG_TYPE_COPY_SEND,
newright,
MACH_MSG_TYPE_COPY_SEND,
- &gen_uids, &genuidlen,
+ &gen_uids, &genuidlen,
&aux_uids, &auxuidlen,
&gen_gids, &gengidlen,
&aux_gids, &auxgidlen);
@@ -398,29 +366,32 @@ S_io_reauthenticate (struct sock_user *user,
if (err)
newuser->isroot = 0;
else
+ /* Check permission as fshelp_isowner would do. */
for (i = 0; i < genuidlen; i++)
- if (gen_uids[i] == 0)
- newuser->isroot = 1;
+ {
+ if (gen_uids[i] == 0 || gen_uids[i] == pfinet_owner)
+ newuser->isroot = 1;
+ if (gen_uids[i] == pfinet_group)
+ for (j = 0; j < gengidlen; j++)
+ if (gen_gids[j] == pfinet_group)
+ newuser->isroot = 1;
+ }
mach_port_move_member (mach_task_self (), newuser->pi.port_right,
pfinet_bucket->portset);
- mutex_unlock (&global_lock);
+ __mutex_unlock (&global_lock);
ports_port_deref (newuser);
if (gubuf != gen_uids)
- vm_deallocate (mach_task_self (), (u_int) gen_uids,
- genuidlen * sizeof (uid_t));
+ munmap (gen_uids, genuidlen * sizeof (uid_t));
if (ggbuf != gen_gids)
- vm_deallocate (mach_task_self (), (u_int) gen_gids,
- gengidlen * sizeof (uid_t));
+ munmap (gen_gids, gengidlen * sizeof (uid_t));
if (aubuf != aux_uids)
- vm_deallocate (mach_task_self (), (u_int) aux_uids,
- auxuidlen * sizeof (uid_t));
+ munmap (aux_uids, auxuidlen * sizeof (uid_t));
if (agbuf != aux_gids)
- vm_deallocate (mach_task_self (), (u_int) aux_gids,
- auxgidlen * sizeof (uid_t));
+ munmap (aux_gids, auxgidlen * sizeof (uid_t));
return 0;
}
@@ -429,31 +400,36 @@ error_t
S_io_restrict_auth (struct sock_user *user,
mach_port_t *newobject,
mach_msg_type_name_t *newobject_type,
- uid_t *uids,
- u_int uidslen,
- uid_t *gids,
- u_int gidslen)
+ uid_t *uids, size_t uidslen,
+ uid_t *gids, size_t gidslen)
{
struct sock_user *newuser;
- int i = 0;
+ int i, j;
int isroot;
if (!user)
return EOPNOTSUPP;
- mutex_lock (&global_lock);
+ __mutex_lock (&global_lock);
isroot = 0;
if (user->isroot)
- for (i = 0; i < uidslen && !isroot; i++)
- if (uids[i] == 0)
- isroot = 1;
-
- newuser = make_sock_user (user->sock, isroot, 0);
+ /* Check permission as fshelp_isowner would do. */
+ for (i = 0; i < uidslen; i++)
+ {
+ if (uids[i] == 0 || uids[i] == pfinet_owner)
+ isroot = 1;
+ if (uids[i] == pfinet_group)
+ for (j = 0; j < gidslen; j++)
+ if (gids[j] == pfinet_group)
+ isroot = 1;
+ }
+
+ newuser = make_sock_user (user->sock, isroot, 0, 0);
*newobject = ports_get_right (newuser);
*newobject_type = MACH_MSG_TYPE_MAKE_SEND;
ports_port_deref (newuser);
- mutex_unlock (&global_lock);
+ __mutex_unlock (&global_lock);
return 0;
}
@@ -465,13 +441,13 @@ S_io_duplicate (struct sock_user *user,
struct sock_user *newuser;
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
- newuser = make_sock_user (user->sock, user->isroot, 0);
+
+ __mutex_lock (&global_lock);
+ newuser = make_sock_user (user->sock, user->isroot, 0, 0);
*newobject = ports_get_right (newuser);
*newobject_type = MACH_MSG_TYPE_MAKE_SEND;
ports_port_deref (newuser);
- mutex_unlock (&global_lock);
+ __mutex_unlock (&global_lock);
return 0;
}
@@ -481,21 +457,21 @@ S_io_identity (struct sock_user *user,
mach_msg_type_name_t *idtype,
mach_port_t *fsys,
mach_msg_type_name_t *fsystype,
- int *fileno)
+ ino_t *fileno)
{
error_t err;
if (!user)
return EOPNOTSUPP;
-
- mutex_lock (&global_lock);
+
+ __mutex_lock (&global_lock);
if (user->sock->identity == MACH_PORT_NULL)
{
err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE,
&user->sock->identity);
if (err)
{
- mutex_unlock (&global_lock);
+ __mutex_unlock (&global_lock);
return err;
}
}
@@ -504,12 +480,19 @@ S_io_identity (struct sock_user *user,
*idtype = MACH_MSG_TYPE_MAKE_SEND;
*fsys = fsys_identity;
*fsystype = MACH_MSG_TYPE_MAKE_SEND;
- *fileno = (ino_t) user->sock; /* matches S_io_stat above */
-
- mutex_unlock (&global_lock);
+ *fileno = user->sock->st_ino;
+
+ __mutex_unlock (&global_lock);
return 0;
}
+error_t
+S_io_revoke (struct sock_user *user)
+{
+ /* XXX maybe we should try */
+ return EOPNOTSUPP;
+}
+
error_t
@@ -534,8 +517,8 @@ S_io_get_owner (struct sock_user *user,
{
return EOPNOTSUPP;
}
-
-error_t
+
+error_t
S_io_get_icky_async_id (struct sock_user *user,
mach_port_t *id,
mach_msg_type_name_t *idtype)
@@ -632,4 +615,3 @@ S_io_sigio (struct sock_user *user)
{
return EOPNOTSUPP;
}
-