aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.library/IkiWiki/Plugin/field.pm662
-rw-r--r--.library/IkiWiki/Plugin/getfield.pm126
-rw-r--r--.library/IkiWiki/Plugin/ymlfront.pm426
-rw-r--r--.templates/page.tmpl3
-rw-r--r--community/meetings.mdwn3
-rw-r--r--community/meetings/debconf10.mdwn20
-rw-r--r--community/meetings/ghm2010.mdwn20
-rw-r--r--community/weblogs.mdwn9
-rw-r--r--community/weblogs/ArneBab/Hurd-showcase-qemu-image.mdwn106
-rw-r--r--community/weblogs/ArneBab/what_we_need.mdwn39
-rw-r--r--community/weblogs/antrik.mdwn15
-rw-r--r--community/weblogs/hook.mdwn25
-rw-r--r--community/weblogs/tschwinge.mdwn15
-rw-r--r--documentation.mdwn12
-rw-r--r--faq.mdwn28
-rw-r--r--faq/sharing_the_user_space.mdwn23
-rw-r--r--hurd/faq.mdwn4
-rw-r--r--hurd/faq/how_about_drivers.mdwn17
-rw-r--r--hurd/faq/how_to_switch_microkernels.mdwn15
-rw-r--r--hurd/faq/which_microkernel.mdwn19
-rw-r--r--hurd/running/arch_hurd.mdwn16
-rw-r--r--hurd/running/debian/faq.mdwn6
-rw-r--r--hurd/running/faq.mdwn4
-rw-r--r--hurd/translator.mdwn6
-rw-r--r--hurd/translator/examples.mdwn6
-rw-r--r--hurd/translator/ext2fs.mdwn13
-rw-r--r--hurd/translator/ext2fs/large_stores.txt510
-rw-r--r--hurd/translator/ext2fs/ogi-fosdem2005.mgp165
-rw-r--r--hurd/translator/gopherfs.mdwn16
-rw-r--r--hurd/translator/netio.mdwn17
-rw-r--r--hurd/translator/tarfs.mdwn25
-rw-r--r--ikiwiki.setup87
-rw-r--r--media_appearances.mdwn7
-rw-r--r--microkernel/faq.mdwn5
-rw-r--r--open_issues/bash_busy-loop.mdwn33
-rw-r--r--open_issues/crashes_vs_system_load_cpu_load_rpc_load.mdwn17
-rw-r--r--open_issues/error_message_disk_full.mdwn14
-rw-r--r--open_issues/glibc___libc_alloca_cutoff_should_be_lowered.mdwn19
-rw-r--r--open_issues/hurdextras.mdwn100
-rw-r--r--open_issues/libgomp_pthread_attr_setstacksize_pthread_stack_min.mdwn17
-rw-r--r--open_issues/libmachuser_libhurduser_rpc_stubs.mdwn26
-rw-r--r--open_issues/nice_vs_mach_thread_priorities.mdwn4
-rw-r--r--open_issues/nptl.mdwn10
-rw-r--r--open_issues/ogi.mdwn25
-rw-r--r--open_issues/phython.mdwn13
-rw-r--r--open_issues/subhurd_error_messages.mdwn15
-rw-r--r--open_issues/system_crash_nmap.mdwn15
-rw-r--r--open_issues/system_crash_pflocal_fifo.mdwn41
-rw-r--r--open_issues/thread-cancel_c_55_hurd_thread_cancel_assertion___spin_lock_locked_ss_critical_section_lock.mdwn41
-rw-r--r--open_issues/unit_testing.mdwn43
-rw-r--r--public_hurd_boxen/installation/snubber.mdwn11
-rw-r--r--sidebar.mdwn1
-rw-r--r--user/jkoenig.mdwn40
53 files changed, 2891 insertions, 64 deletions
diff --git a/.library/IkiWiki/Plugin/field.pm b/.library/IkiWiki/Plugin/field.pm
new file mode 100644
index 00000000..e53474e9
--- /dev/null
+++ b/.library/IkiWiki/Plugin/field.pm
@@ -0,0 +1,662 @@
+#!/usr/bin/perl
+# Ikiwiki field plugin.
+# See doc/plugin/contrib/field.mdwn for documentation.
+package IkiWiki::Plugin::field;
+use warnings;
+use strict;
+=head1 NAME
+
+IkiWiki::Plugin::field - front-end for per-page record fields.
+
+=head1 VERSION
+
+This describes version B<0.05> of IkiWiki::Plugin::field
+
+=cut
+
+our $VERSION = '0.05';
+
+=head1 PREREQUISITES
+
+ IkiWiki
+
+=head1 AUTHOR
+
+ Kathryn Andersen (RUBYKAT)
+ http://github.com/rubykat
+
+=head1 COPYRIGHT
+
+Copyright (c) 2009-2010 Kathryn Andersen
+
+This program is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself.
+
+=cut
+
+use IkiWiki 3.00;
+
+my %Fields = (
+ _first => {
+ id => '_first',
+ seq => 'BB',
+ },
+ _last => {
+ id => '_last',
+ seq => 'YY',
+ },
+ _middle => {
+ id => '_middle',
+ seq => 'MM',
+ },
+);
+my @FieldsLookupOrder = ();
+
+my %Cache = ();
+
+sub field_get_value ($$);
+
+sub import {
+ hook(type => "getsetup", id => "field", call => \&getsetup);
+ hook(type => "checkconfig", id => "field", call => \&checkconfig);
+ hook(type => "scan", id => "field", call => \&scan, last=>1);
+ hook(type => "pagetemplate", id => "field", call => \&pagetemplate);
+}
+
+# ===============================================
+# Hooks
+# ---------------------------
+sub getsetup () {
+ return
+ plugin => {
+ safe => 1,
+ rebuild => undef,
+ },
+ field_register => {
+ type => "hash",
+ example => "field_register => {meta => 'last'}",
+ description => "simple registration of fields by plugin",
+ safe => 0,
+ rebuild => undef,
+ },
+ field_allow_config => {
+ type => "boolean",
+ example => "field_allow_config => 1",
+ description => "allow config settings to be queried",
+ safe => 0,
+ rebuild => undef,
+ },
+ field_tags => {
+ type => "hash",
+ example => "field_tags => {BookAuthor => '/books/authors'}",
+ description => "fields flagged as tag-fields",
+ safe => 0,
+ rebuild => undef,
+ },
+}
+
+sub checkconfig () {
+ # use the simple by-plugin pagestatus method for
+ # those plugins registered with the field_register config option.
+ if (defined $config{field_register})
+ {
+ if (ref $config{field_register} eq 'ARRAY')
+ {
+ foreach my $id (@{$config{field_register}})
+ {
+ field_register(id=>$id);
+ }
+ }
+ elsif (ref $config{field_register} eq 'HASH')
+ {
+ foreach my $id (keys %{$config{field_register}})
+ {
+ field_register(id=>$id, order=>$config{field_register}->{$id});
+ }
+ }
+ else
+ {
+ field_register(id=>$config{field_register});
+ }
+ }
+ if (!defined $config{field_allow_config})
+ {
+ $config{field_allow_config} = 0;
+ }
+} # checkconfig
+
+sub scan (@) {
+ my %params=@_;
+ my $page=$params{page};
+ my $content=$params{content};
+
+ # scan for tag fields
+ if ($config{field_tags})
+ {
+ foreach my $field (sort keys %{$config{field_tags}})
+ {
+ my @values = field_get_value($field, $page);
+ if (@values)
+ {
+ foreach my $tag (@values)
+ {
+ if ($tag)
+ {
+ my $link = $config{field_tags}{$field} . '/'
+ . titlepage($tag);
+ add_link($page, $link, lc($field));
+ }
+ }
+ }
+ }
+ }
+} # scan
+
+sub pagetemplate (@) {
+ my %params=@_;
+ my $page=$params{page};
+ my $template=$params{template};
+
+ field_set_template_values($template, $page);
+} # pagetemplate
+
+# ===============================================
+# Field interface
+# ---------------------------
+
+sub field_register (%) {
+ my %param=@_;
+ if (!exists $param{id})
+ {
+ error 'field_register requires id parameter';
+ return 0;
+ }
+ if (exists $param{call} and !ref $param{call})
+ {
+ error 'field_register call parameter must be function';
+ return 0;
+ }
+
+ $Fields{$param{id}} = \%param;
+ if (!exists $param{call})
+ {
+ # closure to get the data from the pagestate hash
+ $Fields{$param{id}}->{call} = sub {
+ my $field_name = shift;
+ my $page = shift;
+ if (exists $pagestate{$page}{$param{id}}{$field_name})
+ {
+ return (wantarray
+ ? ($pagestate{$page}{$param{id}}{$field_name})
+ : $pagestate{$page}{$param{id}}{$field_name});
+ }
+ elsif (exists $pagestate{$page}{$param{id}}{lc($field_name)})
+ {
+ return (wantarray
+ ? ($pagestate{$page}{$param{id}}{lc($field_name)})
+ : $pagestate{$page}{$param{id}}{lc($field_name)});
+ }
+ return undef;
+ };
+ }
+ # add this to the ordering hash
+ # first, last, order; by default, middle
+ my $when = ($param{first}
+ ? '_first'
+ : ($param{last}
+ ? '_last'
+ : ($param{order}
+ ? ($param{order} eq 'first'
+ ? '_first'
+ : ($param{order} eq 'last'
+ ? '_last'
+ : ($param{order} eq 'middle'
+ ? '_middle'
+ : $param{order}
+ )
+ )
+ )
+ : '_middle'
+ )
+ ));
+ add_lookup_order($param{id}, $when);
+ return 1;
+} # field_register
+
+sub field_get_value ($$) {
+ my $field_name = shift;
+ my $page = shift;
+
+ # This will return the first value it finds
+ # where the value returned is not undefined.
+ # This will return an array of values if wantarray is true.
+
+ # The reason why it checks every registered plugin rather than have
+ # plugins declare which fields they know about, is that it is quite
+ # possible that a plugin doesn't know, ahead of time, what fields
+ # will be available; for example, a YAML format plugin would return
+ # any field that happens to be defined in a YAML page file, which
+ # could be anything!
+
+ my $value = undef;
+ my @array_value = undef;
+
+ # check the cache first
+ if (exists $Cache{$page}{$field_name}
+ and defined $Cache{$page}{$field_name})
+ {
+ return (wantarray
+ ? @{$Cache{$page}{$field_name}{array}}
+ : $Cache{$page}{$field_name}{scalar});
+ }
+
+ if (!@FieldsLookupOrder)
+ {
+ build_fields_lookup_order();
+ }
+ foreach my $id (@FieldsLookupOrder)
+ {
+ $value = $Fields{$id}{call}->($field_name, $page);
+ @array_value = $Fields{$id}{call}->($field_name, $page);
+ if (defined $value)
+ {
+ last;
+ }
+ }
+
+ # extra definitions
+ if (!defined $value)
+ {
+ # Exception for titles
+ # If the title hasn't been found, construct it
+ if ($field_name eq 'title')
+ {
+ $value = pagetitle(IkiWiki::basename($page));
+ }
+ # and set "page" if desired
+ elsif ($field_name eq 'page')
+ {
+ $value = $page;
+ }
+ # the page above this page; aka the current directory
+ elsif ($field_name eq 'parent_page')
+ {
+ if ($page =~ m{^(.*)/[-\.\w]+$})
+ {
+ $value = $1;
+ }
+ }
+ elsif ($field_name eq 'basename')
+ {
+ $value = IkiWiki::basename($page);
+ }
+ elsif ($config{field_allow_config}
+ and $field_name =~ /^config-(.*)$/i)
+ {
+ my $cfield = $1;
+ if (exists $config{$cfield})
+ {
+ $value = $config{$cfield};
+ }
+ }
+ elsif ($field_name =~ /^(.*)-tagpage$/)
+ {
+ my $real_fn = $1;
+ if (exists $config{field_tags}{$real_fn}
+ and defined $config{field_tags}{$real_fn})
+ {
+ my @values = field_get_value($real_fn, $page);
+ if (@values)
+ {
+ foreach my $tag (@values)
+ {
+ if ($tag)
+ {
+ my $link = $config{field_tags}{$real_fn} . '/' . $tag;
+ push @array_value, $link;
+ }
+ }
+ $value = join(",", @array_value);
+ }
+ }
+ }
+ }
+ if (defined $value)
+ {
+ if (!@array_value)
+ {
+ @array_value = ($value);
+ }
+ # cache the value
+ $Cache{$page}{$field_name}{scalar} = $value;
+ $Cache{$page}{$field_name}{array} = \@array_value;
+ }
+ return (wantarray ? @array_value : $value);
+} # field_get_value
+
+# set the values for the given HTML::Template template
+sub field_set_template_values ($$;@) {
+ my $template = shift;
+ my $page = shift;
+ my %params = @_;
+
+ my $get_value_fn = (exists $params{value_fn}
+ ? $params{value_fn}
+ : \&field_get_value);
+
+ # Find the parameter names in this template
+ # and see if you can find their values.
+
+ # The reason we check the template for field names is because we
+ # don't know what fields the registered plugins provide; and this is
+ # reasonable because for some plugins (e.g. a YAML data plugin) they
+ # have no way of knowing, ahead of time, what fields they might be
+ # able to provide.
+
+ my @parameter_names = $template->param();
+ foreach my $field (@parameter_names)
+ {
+ my $type = $template->query(name => $field);
+ if ($type eq 'LOOP' and $field =~ /_LOOP$/i)
+ {
+ # Loop fields want arrays.
+ # Figure out what field names to look for:
+ # * names are from the enclosed loop fields
+ my @loop_fields = $template->query(loop => $field);
+
+ my @loop_vals = ();
+ my %loop_field_arrays = ();
+ foreach my $fn (@loop_fields)
+ {
+ if ($fn !~ /^__/) # not a special loop variable
+ {
+ my @ival_array = $get_value_fn->($fn, $page);
+ if (@ival_array)
+ {
+ $loop_field_arrays{$fn} = \@ival_array;
+ }
+ }
+ }
+ foreach my $fn (sort keys %loop_field_arrays)
+ {
+ my $i = 0;
+ foreach my $v (@{$loop_field_arrays{$fn}})
+ {
+ if (!defined $loop_vals[$i])
+ {
+ $loop_vals[$i] = {};
+ }
+ $loop_vals[$i]{$fn} = $v;
+ $i++;
+ }
+ }
+ $template->param($field => \@loop_vals);
+ }
+ else # not a loop field
+ {
+ my $value = $get_value_fn->($field, $page);
+ if (defined $value)
+ {
+ $template->param($field => $value);
+ }
+ }
+ }
+} # field_set_template_values
+
+# ===============================================
+# Private Functions
+# ---------------------------
+
+# Calculate the lookup order
+# <module, >module, AZ
+# This is crabbed from the PmWiki Markup function
+sub add_lookup_order {
+ my $id = shift;
+ my $when = shift;
+
+ # may have given an explicit ordering
+ if ($when =~ /^[A-Z][A-Z]$/)
+ {
+ $Fields{$id}{seq} = $when;
+ }
+ else
+ {
+ my $cmp = '=';
+ my $seq_field = $when;
+ if ($when =~ /^([<>])(.+)$/)
+ {
+ $cmp = $1;
+ $seq_field = $2;
+ }
+ $Fields{$seq_field}{dep}{$id} = $cmp;
+ if (exists $Fields{$seq_field}{seq}
+ and defined $Fields{$seq_field}{seq})
+ {
+ $Fields{$id}{seq} = $Fields{$seq_field}{seq} . $cmp;
+ }
+ }
+ if ($Fields{$id}{seq})
+ {
+ foreach my $i (keys %{$Fields{$id}{dep}})
+ {
+ my $m = $Fields{$id}{dep}{$i};
+ add_lookup_order($i, "$m$id");
+ }
+ delete $Fields{$id}{dep};
+ }
+}
+
+sub build_fields_lookup_order {
+
+ # remove the _first, _last and _middle dummy fields
+ # because we don't need them anymore
+ delete $Fields{_first};
+ delete $Fields{_last};
+ delete $Fields{_middle};
+ my %lookup_spec = ();
+ # Make a hash of the lookup sequences
+ foreach my $id (sort keys %Fields)
+ {
+ my $seq = ($Fields{$id}{seq}
+ ? $Fields{$id}{seq}
+ : 'MM');
+ if (!exists $lookup_spec{$seq})
+ {
+ $lookup_spec{$seq} = {};
+ }
+ $lookup_spec{$seq}{$id} = 1;
+ }
+
+ # get the field-lookup order by (a) sorting by lookup_spec
+ # and (b) sorting by field-name for the fields that registered
+ # the same field-lookup order
+ foreach my $ord (sort keys %lookup_spec)
+ {
+ push @FieldsLookupOrder, sort keys %{$lookup_spec{$ord}};
+ }
+} # build_fields_lookup_order
+
+# match field funcs
+# page-to-check, wanted
+sub match_a_field ($$) {
+ my $page=shift;
+ my $wanted=shift;
+
+ # The field name is first; the rest is the match
+ my $field_name;
+ my $glob;
+ if ($wanted =~ /^(\w+)\s+(.*)$/)
+ {
+ $field_name = $1;
+ $glob = $2;
+ }
+ else
+ {
+ return IkiWiki::FailReason->new("cannot match field");
+ }
+
+ # turn glob into a safe regexp
+ my $re=IkiWiki::glob2re($glob);
+
+ my $val = IkiWiki::Plugin::field::field_get_value($field_name, $page);
+
+ if (defined $val) {
+ if ($val=~/^$re$/i) {
+ return IkiWiki::SuccessReason->new("$re matches $field_name of $page", $page => $IkiWiki::DEPEND_CONTENT, "" => 1);
+ }
+ else {
+ return IkiWiki::FailReason->new("$re does not match $field_name of $page", "" => 1);
+ }
+ }
+ else {
+ return IkiWiki::FailReason->new("$page does not have a $field_name", "" => 1);
+ }
+} # match_a_field
+
+# check against individual items of a field
+# (treat the field as an array)
+# page-to-check, wanted
+sub match_a_field_item ($$) {
+ my $page=shift;
+ my $wanted=shift;
+
+ # The field name is first; the rest is the match
+ my $field_name;
+ my $glob;
+ if ($wanted =~ /^(\w+)\s+(.*)$/)
+ {
+ $field_name = $1;
+ $glob = $2;
+ }
+ else
+ {
+ return IkiWiki::FailReason->new("cannot match field");
+ }
+
+ # turn glob into a safe regexp
+ my $re=IkiWiki::glob2re($glob);
+
+ my @val_array = IkiWiki::Plugin::field::field_get_value($field_name, $page);
+
+ if (@val_array)
+ {
+ foreach my $val (@val_array)
+ {
+ if (defined $val) {
+ if ($val=~/^$re$/i) {
+ return IkiWiki::SuccessReason->new("$re matches $field_name of $page", $page => $IkiWiki::DEPEND_CONTENT, "" => 1);
+ }
+ }
+ }
+ # not found
+ return IkiWiki::FailReason->new("$re does not match $field_name of $page", "" => 1);
+ }
+ else {
+ return IkiWiki::FailReason->new("$page does not have a $field_name", "" => 1);
+ }
+} # match_a_field_item
+
+# ===============================================
+# PageSpec functions
+# ---------------------------
+
+package IkiWiki::PageSpec;
+
+sub match_field ($$;@) {
+ my $page=shift;
+ my $wanted=shift;
+ return IkiWiki::Plugin::field::match_a_field($page, $wanted);
+} # match_field
+
+sub match_destfield ($$;@) {
+ my $page=shift;
+ my $wanted=shift;
+ my %params=@_;
+
+ return IkiWiki::FailReason->new("cannot match destpage") unless exists $params{destpage};
+
+ # Match the field on the destination page, not the source page
+ return IkiWiki::Plugin::field::match_a_field($params{destpage}, $wanted);
+} # match_destfield
+
+sub match_field_item ($$;@) {
+ my $page=shift;
+ my $wanted=shift;
+ return IkiWiki::Plugin::field::match_a_field_item($page, $wanted);
+} # match_field
+
+sub match_destfield_item ($$;@) {
+ my $page=shift;
+ my $wanted=shift;
+ my %params=@_;
+
+ return IkiWiki::FailReason->new("cannot match destpage") unless exists $params{destpage};
+
+ # Match the field on the destination page, not the source page
+ return IkiWiki::Plugin::field::match_a_field_item($params{destpage}, $wanted);
+} # match_destfield
+
+sub match_field_tagged ($$;@) {
+ my $page=shift;
+ my $wanted=shift;
+ my %params=@_;
+
+ # The field name is first; the rest is the match
+ my $field_name;
+ my $glob;
+ if ($wanted =~ /^(\w+)\s+(.*)$/)
+ {
+ $field_name = $1;
+ $glob = $2;
+ }
+ else
+ {
+ return IkiWiki::FailReason->new("cannot match field");
+ }
+ return match_link($page, $glob, linktype => lc($field_name), @_);
+}
+
+sub match_destfield_tagged ($$;@) {
+ my $page=shift;
+ my $wanted=shift;
+ my %params=@_;
+
+ return IkiWiki::FailReason->new("cannot match destpage") unless exists $params{destpage};
+
+ # Match the field on the destination page, not the source page
+ return IkiWiki::Plugin::field::match_field_tagged($params{destpage}, $wanted);
+}
+
+# ===============================================
+# SortSpec functions
+# ---------------------------
+package IkiWiki::SortSpec;
+
+sub cmp_field {
+ my $field = shift;
+ error(gettext("sort=field requires a parameter")) unless defined $field;
+
+ my $left = IkiWiki::Plugin::field::field_get_value($field, $a);
+ my $right = IkiWiki::Plugin::field::field_get_value($field, $b);
+
+ $left = "" unless defined $left;
+ $right = "" unless defined $right;
+ return $left cmp $right;
+}
+
+sub cmp_field_natural {
+ my $field = shift;
+ error(gettext("sort=field requires a parameter")) unless defined $field;
+
+ eval q{use Sort::Naturally};
+ error $@ if $@;
+
+ my $left = IkiWiki::Plugin::field::field_get_value($field, $a);
+ my $right = IkiWiki::Plugin::field::field_get_value($field, $b);
+
+ $left = "" unless defined $left;
+ $right = "" unless defined $right;
+ return Sort::Naturally::ncmp($left, $right);
+}
+
+1;
diff --git a/.library/IkiWiki/Plugin/getfield.pm b/.library/IkiWiki/Plugin/getfield.pm
new file mode 100644
index 00000000..d6564eaf
--- /dev/null
+++ b/.library/IkiWiki/Plugin/getfield.pm
@@ -0,0 +1,126 @@
+#!/usr/bin/perl
+# Ikiwiki getfield plugin.
+# Substitute field values in the content of the page.
+# See plugin/contrib/getfield for documentation.
+package IkiWiki::Plugin::getfield;
+use strict;
+=head1 NAME
+
+IkiWiki::Plugin::getfield - query the values of fields
+
+=head1 VERSION
+
+This describes version B<0.02> of IkiWiki::Plugin::getfield
+
+=cut
+
+our $VERSION = '0.02';
+
+=head1 PREREQUISITES
+
+ IkiWiki
+ IkiWiki::Plugin::field
+
+=head1 AUTHOR
+
+ Kathryn Andersen (RUBYKAT)
+ http://github.com/rubykat
+
+=head1 COPYRIGHT
+
+Copyright (c) 2009 Kathryn Andersen
+
+This program is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself.
+
+=cut
+
+use IkiWiki 3.00;
+
+sub import {
+ hook(type => "getsetup", id => "getfield", call => \&getsetup);
+ hook(type => "filter", id => "getfield", call => \&do_filter, last=>1);
+
+ IkiWiki::loadplugin("field");
+}
+
+#---------------------------------------------------------------
+# Hooks
+# --------------------------------
+
+sub getsetup () {
+ return
+ plugin => {
+ safe => 1,
+ rebuild => undef,
+ },
+}
+
+sub do_filter (@) {
+ my %params=@_;
+ my $page = $params{page};
+ my $destpage = ($params{destpage} ? $params{destpage} : $params{page});
+
+ my $page_file=$pagesources{$page};
+ my $page_type=pagetype($page_file);
+ if (defined $page_type)
+ {
+ while ($params{content} =~ /{{\$([-\w\/]+#)?[-\w]+}}/)
+ {
+ # substitute {{$var}} variables (source-page)
+ $params{content} =~ s/{{\$([-\w]+)}}/get_field_value($1,$page)/eg;
+
+ # substitute {{$page#var}} variables (source-page)
+ $params{content} =~ s/{{\$([-\w\/]+)#([-\w]+)}}/get_other_page_field_value($2,$page,$1)/eg;
+ }
+ }
+
+ $page_file=$pagesources{$destpage};
+ $page_type=pagetype($page_file);
+ if (defined $page_type)
+ {
+ while ($params{content} =~ /{{\+\$([-\w\/]+#)?[-\w]+\+}}/)
+ {
+ # substitute {{+$var+}} variables (dest-page)
+ $params{content} =~ s/{{\+\$([-\w]+)\+}}/get_field_value($1,$destpage)/eg;
+ # substitute {{+$page#var+}} variables (source-page)
+ $params{content} =~ s/{{\+\$([-\w\/]+)#([-\w]+)\+}}/get_other_page_field_value($2,$destpage,$1)/eg;
+ }
+ }
+
+ return $params{content};
+} # do_filter
+
+#---------------------------------------------------------------
+# Private functions
+# --------------------------------
+sub get_other_page_field_value ($$$) {
+ my $field = shift;
+ my $page = shift;
+ my $other_page = shift;
+
+ my $use_page = bestlink($page, $other_page);
+ # add a dependency for the page from which we get the value
+ add_depends($page, $other_page);
+
+ my $val = get_field_value($field, $use_page);
+ if ($val eq $field)
+ {
+ return "${other_page}#$field";
+ }
+ return $val;
+
+} # get_other_page_field_value
+
+sub get_field_value ($$) {
+ my $field = shift;
+ my $page = shift;
+
+ my $value = IkiWiki::Plugin::field::field_get_value($field,$page);
+ return $value if defined $value;
+
+ # if there is no value, return the field name.
+ return $field;
+} # get_field_value
+
+1;
diff --git a/.library/IkiWiki/Plugin/ymlfront.pm b/.library/IkiWiki/Plugin/ymlfront.pm
new file mode 100644
index 00000000..3811591b
--- /dev/null
+++ b/.library/IkiWiki/Plugin/ymlfront.pm
@@ -0,0 +1,426 @@
+#!/usr/bin/perl
+# YAML format for structured data
+# See plugins/contrib/ymlfront for documentation.
+package IkiWiki::Plugin::ymlfront;
+use warnings;
+use strict;
+=head1 NAME
+
+IkiWiki::Plugin::ymlfront - add YAML-format data to a page
+
+=head1 VERSION
+
+This describes version B<0.03> of IkiWiki::Plugin::ymlfront
+
+=cut
+
+our $VERSION = '0.03';
+
+=head1 PREREQUISITES
+
+ IkiWiki
+ IkiWiki::Plugin::field
+ YAML::Any
+
+=head1 AUTHOR
+
+ Kathryn Andersen (RUBYKAT)
+ http://github.com/rubykat
+
+=head1 COPYRIGHT
+
+Copyright (c) 2009 Kathryn Andersen
+
+This program is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself.
+
+=cut
+use IkiWiki 3.00;
+
+sub import {
+ hook(type => "getsetup", id => "ymlfront", call => \&getsetup);
+ hook(type => "checkconfig", id => "ymlfront", call => \&checkconfig);
+ hook(type => "filter", id => "ymlfront", call => \&filter, first=>1);
+ hook(type => "preprocess", id => "ymlfront", call => \&preprocess, scan=>1);
+ hook(type => "scan", id => "ymlfront", call => \&scan);
+ hook(type => "checkcontent", id => "ymlfront", call => \&checkcontent);
+
+ IkiWiki::loadplugin('field');
+ IkiWiki::Plugin::field::field_register(id=>'ymlfront',
+ call=>\&yml_get_value,
+ first=>1);
+}
+
+# ------------------------------------------------------------
+# Hooks
+# --------------------------------
+sub getsetup () {
+ return
+ plugin => {
+ safe => 1,
+ rebuild => 1,
+ },
+ ymlfront_delim => {
+ type => "array",
+ example => "ymlfront_sep => [qw(--YAML-START-- --YAML-END--)]",
+ description => "delimiters of YAML data",
+ safe => 0,
+ rebuild => undef,
+ },
+}
+
+sub checkconfig () {
+ eval q{use YAML::Any};
+ eval q{use YAML} if $@;
+ if ($@)
+ {
+ return error ("ymlfront: failed to use YAML::Any or YAML");
+ }
+
+ $YAML::UseBlock = 1;
+ $YAML::Syck::ImplicitUnicode = 1;
+
+ if (!defined $config{ymlfront_delim})
+ {
+ $config{ymlfront_delim} = [qw(--- ---)];
+ }
+} # checkconfig
+
+# scan gets called before filter
+sub scan (@) {
+ my %params=@_;
+ my $page = $params{page};
+
+ my $page_file=$pagesources{$page} || return;
+ my $page_type=pagetype($page_file);
+ if (!defined $page_type)
+ {
+ return;
+ }
+ # clear the old data
+ if (exists $pagestate{$page}{ymlfront})
+ {
+ delete $pagestate{$page}{ymlfront};
+ }
+ my $parsed_yml = parse_yml(%params);
+ if (defined $parsed_yml
+ and defined $parsed_yml->{yml})
+ {
+ # save the data to pagestate
+ foreach my $fn (keys %{$parsed_yml->{yml}})
+ {
+ my $fval = $parsed_yml->{yml}->{$fn};
+ $pagestate{$page}{ymlfront}{$fn} = $fval;
+ }
+ }
+ # update meta hash
+ if (exists $pagestate{$page}{ymlfront}{title}
+ and $pagestate{$page}{ymlfront}{title})
+ {
+ $pagestate{$page}{meta}{title} = $pagestate{$page}{ymlfront}{title};
+ }
+ if (exists $pagestate{$page}{ymlfront}{description}
+ and $pagestate{$page}{ymlfront}{description})
+ {
+ $pagestate{$page}{meta}{description} = $pagestate{$page}{ymlfront}{description};
+ }
+ if (exists $pagestate{$page}{ymlfront}{author}
+ and $pagestate{$page}{ymlfront}{author})
+ {
+ $pagestate{$page}{meta}{author} = $pagestate{$page}{ymlfront}{author};
+ }
+} # scan
+
+# use this for data in a [[!ymlfront ...]] directive
+sub preprocess (@) {
+ my %params=@_;
+ my $page = $params{page};
+
+ if (! exists $params{data}
+ or ! defined $params{data}
+ or !$params{data})
+ {
+ error gettext("missing data parameter")
+ }
+ # All the work of this is done in scan mode;
+ # when in preprocessing mode, just return an empty string.
+ my $scan=! defined wantarray;
+
+ if (!$scan)
+ {
+ return '';
+ }
+
+ # clear the old data
+ if (exists $pagestate{$page}{ymlfront})
+ {
+ delete $pagestate{$page}{ymlfront};
+ }
+ my $parsed_yml = parse_yml(%params);
+ if (defined $parsed_yml
+ and defined $parsed_yml->{yml})
+ {
+ # save the data to pagestate
+ foreach my $fn (keys %{$parsed_yml->{yml}})
+ {
+ my $fval = $parsed_yml->{yml}->{$fn};
+ $pagestate{$page}{ymlfront}{$fn} = $fval;
+ }
+ }
+ # update meta hash
+ if (exists $pagestate{$page}{ymlfront}{title}
+ and $pagestate{$page}{ymlfront}{title})
+ {
+ $pagestate{$page}{meta}{title} = $pagestate{$page}{ymlfront}{title};
+ }
+ if (exists $pagestate{$page}{ymlfront}{description}
+ and $pagestate{$page}{ymlfront}{description})
+ {
+ $pagestate{$page}{meta}{description} = $pagestate{$page}{ymlfront}{description};
+ }
+ if (exists $pagestate{$page}{ymlfront}{author}
+ and $pagestate{$page}{ymlfront}{author})
+ {
+ $pagestate{$page}{meta}{author} = $pagestate{$page}{ymlfront}{author};
+ }
+ return '';
+} # preprocess
+
+sub filter (@) {
+ my %params=@_;
+ my $page = $params{page};
+
+ my $page_file=$pagesources{$page} || return $params{content};
+ my $page_type=pagetype($page_file);
+ if (!defined $page_type)
+ {
+ return $params{content};
+ }
+ my $parsed_yml = parse_yml(%params);
+ if (defined $parsed_yml
+ and defined $parsed_yml->{yml}
+ and defined $parsed_yml->{content})
+ {
+ $params{content} = $parsed_yml->{content};
+ # also check for a content value
+ if (exists $pagestate{$page}{ymlfront}{content}
+ and defined $pagestate{$page}{ymlfront}{content}
+ and $pagestate{$page}{ymlfront}{content})
+ {
+ $params{content} .= $pagestate{$page}{ymlfront}{content};
+ }
+ }
+
+ return $params{content};
+} # filter
+
+# check the correctness of the YAML code before saving a page
+sub checkcontent {
+ my %params=@_;
+ my $page = $params{page};
+
+ my $page_file=$pagesources{$page};
+ if ($page_file)
+ {
+ my $page_type=pagetype($page_file);
+ if (!defined $page_type)
+ {
+ return undef;
+ }
+ }
+ my $parsed_yml = parse_yml(%params);
+ if (!defined $parsed_yml)
+ {
+ debug("ymlfront: Save of $page failed: $@");
+ return gettext("YAML data incorrect: $@");
+ }
+ return undef;
+} # checkcontent
+
+# ------------------------------------------------------------
+# Field functions
+# --------------------------------
+sub yml_get_value ($$) {
+ my $field_name = shift;
+ my $page = shift;
+
+ my $value = undef;
+ if (exists $pagestate{$page}{ymlfront}{$field_name})
+ {
+ $value = $pagestate{$page}{ymlfront}{$field_name};
+ }
+ elsif (exists $pagestate{$page}{ymlfront}{lc($field_name)})
+ {
+ $value = $pagestate{$page}{ymlfront}{lc($field_name)};
+ }
+ if (defined $value)
+ {
+ if (ref $value)
+ {
+ my @value_array = @{$value};
+ return (wantarray
+ ? @value_array
+ : join(",", @value_array));
+ }
+ else
+ {
+ return (wantarray ? ($value) : $value);
+ }
+ }
+ return undef;
+} # yml_get_value
+
+# ------------------------------------------------------------
+# Helper functions
+# --------------------------------
+
+# parse the YAML data from the given content
+# Expects page, content
+# Returns { yml=>%yml_data, content=>$content } or undef
+sub parse_yml {
+ my %params=@_;
+ my $page = $params{page};
+ my $content = $params{content};
+
+ my $page_file=$pagesources{$page};
+ if ($page_file)
+ {
+ my $page_type=pagetype($page_file);
+ if (!defined $page_type)
+ {
+ return undef;
+ }
+ }
+ my $start_of_content = '';
+ my $yml_str = '';
+ my $rest_of_content = '';
+ if ($params{data})
+ {
+ $yml_str = $params{data};
+ }
+ elsif ($content)
+ {
+ my $regex = qr{
+ (\\?) # 1: escape?
+ \[\[(!) # directive open; 2: prefix
+ (ymlfront) # 3: command
+ ( # 4: the parameters..
+ \s+ # Must have space if parameters present
+ (?:
+ (?:[-\w]+=)? # named parameter key?
+ (?:
+ """.*?""" # triple-quoted value
+ |
+ "[^"]*?" # single-quoted value
+ |
+ [^"\s\]]+ # unquoted value
+ )
+ \s* # whitespace or end
+ # of directive
+ )
+ *)? # 0 or more parameters
+ \]\] # directive closed
+ }sx;
+ my $ystart = $config{ymlfront_delim}[0];
+ my $yend = $config{ymlfront_delim}[1];
+ if ($ystart eq '---'
+ and $yend eq '---'
+ and $content =~ /^---[\n\r](.*?[\n\r])---[\n\r](.*)$/s)
+ {
+ $yml_str = $1;
+ $rest_of_content = $2;
+ }
+ elsif ($content =~ /^(.*?)${ystart}[\n\r](.*?[\n\r])${yend}([\n\r].*)$/s)
+ {
+ $yml_str = $2;
+ $rest_of_content = $1 . $3;
+ }
+ elsif ($content =~ /$regex/)
+ {
+ my $escape=$1;
+ my $prefix=$2;
+ my $command=$3;
+ my $params=$4;
+ if ($escape)
+ {
+ $rest_of_content = $content;
+ }
+ else
+ {
+ my %phash = ();
+ while ($params =~ m{
+ (?:([-\w]+)=)? # 1: named parameter key?
+ (?:
+ """(.*?)""" # 2: triple-quoted value
+ |
+ "([^"]*?)" # 3: single-quoted value
+ |
+ (\S+) # 4: unquoted value
+ )
+ (?:\s+|$) # delimiter to next param
+ }sgx) {
+ my $key=$1;
+ my $val;
+ if (defined $2) {
+ $val=$2;
+ $val=~s/\r\n/\n/mg;
+ $val=~s/^\n+//g;
+ $val=~s/\n+$//g;
+ }
+ elsif (defined $3) {
+ $val=$3;
+ }
+ elsif (defined $4) {
+ $val=$4;
+ }
+
+ if (defined $key) {
+ $phash{$key} = $val;
+ }
+ else {
+ $phash{''} = $val;
+ }
+ }
+ if (defined $phash{data})
+ {
+ $yml_str = $phash{data};
+ $content =~ /^(.*?)\[\[!ymlfront.*?\]\](.*?)$/s;
+ $start_of_content = $1;
+ $rest_of_content = $2;
+ }
+ }
+ }
+ }
+ if ($yml_str)
+ {
+ # if {{$page}} is there, do an immediate substitution
+ $yml_str =~ s/\{\{\$page\}\}/$page/sg;
+
+ my $ydata;
+ eval q{$ydata = Load($yml_str);};
+ if ($@)
+ {
+ debug("ymlfront: Load of $page failed: $@");
+ return undef;
+ }
+ if (!$ydata)
+ {
+ debug("ymlfront: no YAML for $page");
+ return undef;
+ }
+ my %lc_data = ();
+ if ($ydata)
+ {
+ # make lower-cased versions of the data
+ foreach my $fn (keys %{$ydata})
+ {
+ my $fval = $ydata->{$fn};
+ $lc_data{lc($fn)} = $fval;
+ }
+ }
+ return { yml=>\%lc_data,
+ content=>$start_of_content . $rest_of_content};
+ }
+ return { yml=>undef, content=>$content };
+} # parse_yml
+1;
diff --git a/.templates/page.tmpl b/.templates/page.tmpl
index 4ec3b4bf..5192138a 100644
--- a/.templates/page.tmpl
+++ b/.templates/page.tmpl
@@ -11,7 +11,6 @@
</TMPL_IF>
</TMPL_IF>
<TMPL_IF HTML5><meta charset="utf-8" /><TMPL_ELSE><meta http-equiv="Content-Type" content="text/html; charset=utf-8" /></TMPL_IF>
-<meta http-equiv="Cache-Control" content="must-revalidate" />
<title><TMPL_VAR TITLE></title>
<TMPL_IF FAVICON>
<link rel="icon" href="<TMPL_VAR BASEURL><TMPL_VAR FAVICON>" type="image/x-icon" />
@@ -124,6 +123,7 @@
<TMPL_VAR CONTENT>
<TMPL_IF HTML5></section><TMPL_ELSE></div></TMPL_IF>
+<TMPL_UNLESS DYNAMIC>
<TMPL_IF COMMENTS>
<TMPL_IF HTML5><section id="comments"><TMPL_ELSE><div id="comments"></TMPL_IF>
<TMPL_VAR COMMENTS>
@@ -136,6 +136,7 @@
</TMPL_IF>
<TMPL_IF HTML5></section><TMPL_ELSE></div></TMPL_IF>
</TMPL_IF>
+</TMPL_UNLESS>
</div>
diff --git a/community/meetings.mdwn b/community/meetings.mdwn
index ba4fc2bd..ecd0e465 100644
--- a/community/meetings.mdwn
+++ b/community/meetings.mdwn
@@ -17,7 +17,8 @@ is included in the section entitled
# Past
- * [GNU Hackers Meeting in the Hague 2010](http://www.gnu.org/ghm/2010/denhaag/)
+ * [[DebConf10]]
+ * [[GNU Hackers Meeting in the Hague 2010|ghm2010]]
* [[FOSDEM 2010]]
* [[EuroSys_2009]]
* [[FOSDEM_2008]]
diff --git a/community/meetings/debconf10.mdwn b/community/meetings/debconf10.mdwn
new file mode 100644
index 00000000..bafd7de0
--- /dev/null
+++ b/community/meetings/debconf10.mdwn
@@ -0,0 +1,20 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="DebConf10"]]
+
+<http://debconf10.debconf.org/>
+
+ * {{$banck_hurd}}
+
+
+[[!ymlfront data="""
+banck_hurd: "presentation (including video) by Michael Banck: [*Debian GNU/Hurd -- Past. Present. And Future?*](http://penta.debconf.org/dc10_schedule/events/595.en.html) ([slides](http://people.debian.org/~mbanck/debian-hurd.pdf))"
+"""]]
diff --git a/community/meetings/ghm2010.mdwn b/community/meetings/ghm2010.mdwn
new file mode 100644
index 00000000..b5cb7311
--- /dev/null
+++ b/community/meetings/ghm2010.mdwn
@@ -0,0 +1,20 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="GNU Hackers Meeting in the Hague 2010"]]
+
+<http://www.gnu.org/ghm/2010/denhaag/>
+
+ * {{$walfield_hurd}}
+
+
+[[!ymlfront data="""
+walfield_hurd: "video of presentation by Neal Walfield: [*GNU/Hurd: It's About Freedom (Or: Why you should care)*](http://audio-video.gnu.org/video/ghm2010/GNU-Hurd_-_Its_About_Freedom,_Or_Why_you_should_care.ogv)"
+"""]]
diff --git a/community/weblogs.mdwn b/community/weblogs.mdwn
index 8df216ba..28f413eb 100644
--- a/community/weblogs.mdwn
+++ b/community/weblogs.mdwn
@@ -1,4 +1,5 @@
-[[!meta copyright="Copyright © 2008, 2009 Free Software Foundation, Inc."]]
+[[!meta copyright="Copyright © 2008, 2009, 2010 Free Software Foundation,
+Inc."]]
[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
id="license" text="Permission is granted to copy, distribute and/or modify this
@@ -10,6 +11,12 @@ is included in the section entitled
Weblogs from Hurd programmers and enthusiasts.
+[[!map
+pages="community/weblogs/* and !community/weblogs/*/*"
+show=title]]
+
+---
+
[[!inline
pages="community/weblogs/*/* and !community/weblogs/*/*/*"
show=0
diff --git a/community/weblogs/ArneBab/Hurd-showcase-qemu-image.mdwn b/community/weblogs/ArneBab/Hurd-showcase-qemu-image.mdwn
new file mode 100644
index 00000000..d55527a7
--- /dev/null
+++ b/community/weblogs/ArneBab/Hurd-showcase-qemu-image.mdwn
@@ -0,0 +1,106 @@
+I’m currently preparing a qemu image for the Hurd which allows testing the capabilities of the Hurd with as little effort as possible.
+
+**Work in progress. These are my in-development notes.**
+
+For that I want to use:
+
+* An up to date debian image (no longer online, but I have a copy here).
+* My [Hurd Intro](http://bitbucket.org/ArneBab/hurd_intro),
+* Translators from [hurd-extras](http://www.nongnu.org/hurdextras/) and the [incubator](http://git.savannah.gnu.org/cgit/hurd/incubator.git/), and naturally
+* a lot of apt-get update; apt-get upgrade and apt-get dist-upgrade :) (all worked flawlessly).
+
+## Working
+
+### Generally
+
+ # ssh with public key
+ apt-get install random-egd
+ ssh-keygen
+
+ # build tools
+ apt-get install build-essential
+
+### StoreIO
+
+ # mount an iso image
+ mount foo.iso bar -t iso9660fs
+ # see myfile as device
+ settrans foo /hurd/storeio myfile
+ # so that means I can pack a complete chroot (300MB) into a file with storeio and ext2fs — giselher
+
+ # nfs mount anywhere (TODO: check this with antrik)
+ mount server:/home /home -t nfs
+ settrans /home /hurd/nfs server:/home
+
+## In Progress
+
+### Hurdextras
+
+ hg clone <hurdextras repo>
+
+### httpfs
+
+ # pkg-config is needed to avoid “PKG_CHECK_MODULES syntax error near unexpected token `HTTPFS,'”
+ # pkg-config must be installed before you run autoreconf.
+ apt-get install autoconf autoconf-archive libxml2-dev pkg-config
+ autoreconf -i -f
+ ./configure
+ make
+ make install
+
+ settrans -ac gnu /usr/local/httpfs www.gnu.org/
+ # (breaks, because libxml2 needs pthreads → work to do.)
+ # (what we need: pthreads in translators. → see the [work of Barry](https://savannah.gnu.org/task/?func=detailitem&item_id=5487))
+ # check: for i in `objdump -x /usr/local/bin/httpfs |grep NEEDED| sed s/.*NEEDED//`; do echo $i; objdump -x /usr/lib/$i | grep pthread; objdump -x /lib/$i | grep pthread; done
+
+### Tarfs
+
+ apt-get install zip libz-dev libbz2-dev
+ hg clone http://bitbucket.org/ArneBab/hurd-tarfs tarfs
+ cd tarfs
+ make
+ make install
+ # works, though with warnings.
+
+ settrans -ca new /hurd/tarfs -cz test/intro.tar.gz
+ cp repos/intro/README new/
+ settrans -g new
+ tar -tf test/intro.tar.gz
+ # works
+
+ tar -cf test/intro.tar repos/intro
+ settrans -ac t /hurd/tarfs test/intro.tar
+ # (settrans: /hurd/tarfs: Translator died :( ⇒ more work to do )
+
+### nsmux
+
+ git clone git://git.sv.gnu.org/hurd/incubator.git nsmux
+ cd nsmux/
+ git checkout -b nsmux origin/nsmux
+
+ apt-get install autoconf autoconf-archive
+ autoreconf -i -f
+ ./configure
+ make
+ make install
+
+ cd ../..
+ mkdir test
+ settrans -c test2 /usr/local/bin/nsmux test
+ tar -cf test/intro.tar repos/hurd_intro
+ ls test2/intro.tar,,tarfs
+
+### clisp
+
+ git clone git://git.sv.gnu.org/hurd/incubator.git clisp
+ cd clisp/
+ git checkout -b clisp origin/clisp
+
+ apt-get install texi2html
+ make
+ make install
+
+
+### debugging Translators
+
+ rpctrace
diff --git a/community/weblogs/ArneBab/what_we_need.mdwn b/community/weblogs/ArneBab/what_we_need.mdwn
new file mode 100644
index 00000000..cb4e55b2
--- /dev/null
+++ b/community/weblogs/ArneBab/what_we_need.mdwn
@@ -0,0 +1,39 @@
+We created a list of the things we still need for using the Hurd for in our day-to-day activities (work or hobby).
+
+As soon as these issues are taken care of, the Hurd offers everything we need for fullfilling most of our computing needs on at least one of our devices:
+
+- USB (5): Arne, ms, Michael, Emilio, antrik²³
+- Wireless (5): Arne, ms, Carl Fredrik, Michael (netbook), antrik (notebook)
+- Sound (4): ms, Carl Fredrik, Michael, antrik²
+
+- SATA (2): Michael, (Emilio)
+- Tested for modern machines°¹ (2): Emilio, antrik (notebook)
+- Stable Xorg° (2): Emilio, antrik
+- PPPoE (2): Carl Fredrik, antrik²
+
+- Graphical Desktop (1): Emilio
+- Full featured high-resultion console which doesn’t need X (1): antrik
+- Switching between console and X° (1): antrik
+- full-featured browser (i.e. Firefox)°⁵ (1): antrik
+- NFS working for climm, w3m and git (1): antrik⁴
+- mplayer with win32codecs (1): antrik³
+- gnash or alternatives (1): antrik³
+
+°: Very likely needed by more people, but not named as most pressing issue.
+¹: It’s unclear on which processors the Hurd would have problems. Please report it if you have one!
+→ [http://www.mail-archive.com/bug-hurd@gnu.org/msg19105.html](info)
+²: Would be OK to use a router box instead.
+³: Not critical but would be convenient.
+⁴: Only while *not* using Hurd as the only machine.
+⁵: [We’re close to that](http://www.mail-archive.com/bug-hurd@gnu.org/msg19177.html).
+
+So, if one of these issues seems to be interesting for you, or you think “I can do that easily”,
+why not become a Hurd hacker and add your touch? :)
+
+You can reach us in the [[mailing_lists]] and in [[irc]].
+
+The sourcecode is in our [[source_repositories]] (git). When you want to check sources relevant for you, [DDE](http://git.savannah.gnu.org/cgit/hurd/incubator.git/tree/?h=dde) might be a good place to start for wireless and sound. USB on the other hand might need work in [gnumach](http://git.savannah.gnu.org/cgit/hurd/gnumach.git/) ([[hacking_info|microkernel/mach/gnumach]]).
+
+Besides: “The great next stuff” is in the [incubator git repo](http://git.savannah.gnu.org/cgit/hurd/incubator.git/), including (among others) [clisp](http://git.savannah.gnu.org/cgit/hurd/incubator.git/tree/?h=clisp) (translators in lisp) and [nsmux](http://git.savannah.gnu.org/cgit/hurd/incubator.git/tree/?h=nsmux) (dynamically setting translators on files for one command by accessing `file,,translator`).
+
+Happy hacking!
diff --git a/community/weblogs/antrik.mdwn b/community/weblogs/antrik.mdwn
new file mode 100644
index 00000000..6db88dd9
--- /dev/null
+++ b/community/weblogs/antrik.mdwn
@@ -0,0 +1,15 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!inline
+pages="community/weblogs/antrik/* and !community/weblogs/antrik/*/*"
+show=0
+actions=no
+rootpage="community/weblogs/antrik" postformtext="Add a new entry named:"]]
diff --git a/community/weblogs/hook.mdwn b/community/weblogs/hook.mdwn
new file mode 100644
index 00000000..e9e083dc
--- /dev/null
+++ b/community/weblogs/hook.mdwn
@@ -0,0 +1,25 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+Well as [[weblogs/ArneBab]] asked me to, I made a blog here in the Hurd's community section.
+
+So I suppose it's time for me to introduce myself. I'm a lawyer student just short of my masters' (called "diploma" here in Slovenia) and a hacker by heart. I've been using GNU/Linux for over a decade now, started on Slackware and continued on Gentoo. I try to give back to the community by being an active member (posting bugs and whatnot), coordinating the local FSFE Fellowship group and lately also lending a hand to the Gentoo Licenses team. I keep a [website and blog](http://matija.suklje.name) of my own and occasionally even write some short sad piece of sloppy code.
+
+Small disclaimer about my coding abilities:
+
+ 10 IANAC IAAL
+
+For those who wonder about what IANAC IAAL means — it's the oposite of IANAL IAAC and means "I Am Not A Coder, I Am A Lawyer" ;)
+
+[[!inline
+pages="community/weblogs/hook/* and !community/weblogs/hook/*/*"
+show=0
+actions=no
+rootpage="community/weblogs/hook" postformtext="Add a new entry named:"]]
diff --git a/community/weblogs/tschwinge.mdwn b/community/weblogs/tschwinge.mdwn
new file mode 100644
index 00000000..fc0d2ace
--- /dev/null
+++ b/community/weblogs/tschwinge.mdwn
@@ -0,0 +1,15 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!inline
+pages="community/weblogs/tschwinge/* and !community/weblogs/tschwinge/*/*"
+show=0
+actions=no
+rootpage="community/weblogs/tschwinge" postformtext="Add a new entry named:"]]
diff --git a/documentation.mdwn b/documentation.mdwn
index d96cb24b..62d96e9c 100644
--- a/documentation.mdwn
+++ b/documentation.mdwn
@@ -8,6 +8,8 @@ Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
is included in the section entitled
[[GNU Free Documentation License|/fdl]]."]]"""]]
+[[FAQ]]
+
Documentation for...
* [[GNU_Hurd|hurd/documentation]]
@@ -17,6 +19,16 @@ Documentation for...
* [[MIG|microkernel/mach/mig/documentation]]
+# Presentations
+
+## 2004
+
+ * 2004-07-02
+
+ Ognyan Kulev, *presentation of the Hurd*, at the seminar *LIO and friends*,
+ <http://debian.fmi.uni-sofia.bg/~ogi/hurd/liofest-20040702-hurd.ppt>, in
+ Bulgarian.
+
# General
* [[Media_Appearances]]
diff --git a/faq.mdwn b/faq.mdwn
new file mode 100644
index 00000000..9167ede6
--- /dev/null
+++ b/faq.mdwn
@@ -0,0 +1,28 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="FAQ"]]
+
+Also see the...
+
+ * [[microkernel FAQ|microkernel/faq]],
+
+ * [[GNU Hurd FAQ|hurd/faq]],
+
+ * [[running GNU Hurd FAQ|hurd/running/faq]],
+
+ * [[Debian GNU/Hurd FAQ|hurd/running/debian/faq]].
+
+[[!inline
+pages="faq/* and !*/discussion"
+show=0
+feeds=no
+actions=yes
+rootpage="faq" postformtext="Add a new item titled:"]]
diff --git a/faq/sharing_the_user_space.mdwn b/faq/sharing_the_user_space.mdwn
new file mode 100644
index 00000000..7d09ccc0
--- /dev/null
+++ b/faq/sharing_the_user_space.mdwn
@@ -0,0 +1,23 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+*Question:* Could it be possible to have a system installation where you can
+dual-boot using either the [[Linux]] kernel, or the GNU Hurd, so that
+everything but the kernel is shared?
+
+*Answer:* Given that both Linux and GNU Hurd are using the [[ELF]] binary
+format, this could indeed be made possible, if all programs agreed to rely on
+only one abstraction layer, for example the standard C library ([[glibc]]).
+(Additionally, for example for system calls that are not covered by glibc
+calls, you'd need to be able to reliably trap and emulate these.) However,
+Linux' and the GNU Hurd's [[ABI]]'s have sufficiently diverged, so that this is
+not easy to do. That's why you can't currently install a system in this way,
+but you need a separate installation of the userspace suited for the Linux
+kernel, or the GNU Hurd.
diff --git a/hurd/faq.mdwn b/hurd/faq.mdwn
index be30e1b4..413aaf3f 100644
--- a/hurd/faq.mdwn
+++ b/hurd/faq.mdwn
@@ -1,4 +1,4 @@
-[[!meta copyright="Copyright © 2008 Free Software Foundation, Inc."]]
+[[!meta copyright="Copyright © 2008, 2010 Free Software Foundation, Inc."]]
[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
id="license" text="Permission is granted to copy, distribute and/or modify this
@@ -10,6 +10,8 @@ is included in the section entitled
[[!meta title="GNU Hurd FAQ"]]
+See also other [[/FAQ]].
+
[[!inline
pages="hurd/faq/* and !*/discussion"
show=0
diff --git a/hurd/faq/how_about_drivers.mdwn b/hurd/faq/how_about_drivers.mdwn
new file mode 100644
index 00000000..0556fd28
--- /dev/null
+++ b/hurd/faq/how_about_drivers.mdwn
@@ -0,0 +1,17 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="What drivers does GNU/Hurd have?"]]
+
+Currently, Mach integrates drivers from Linux 2.0 through some glue code. As
+it's very old, that limits hardware support a lot, of course. We are however
+working on using the DDE toolkit to run linux drivers in userland processes,
+which provides both long-term support for new hardware and safety against driver
+bugs.
diff --git a/hurd/faq/how_to_switch_microkernels.mdwn b/hurd/faq/how_to_switch_microkernels.mdwn
new file mode 100644
index 00000000..468fab54
--- /dev/null
+++ b/hurd/faq/how_to_switch_microkernels.mdwn
@@ -0,0 +1,15 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="How difficult would it be to switch to another microkernel?"]]
+
+One thing for sure is to rewrite the mach and sysdeps/mach parts of glibc and
+libpthread. Quite a few tools also assume a Mach kernel and would have to be
+rewritten.
diff --git a/hurd/faq/which_microkernel.mdwn b/hurd/faq/which_microkernel.mdwn
new file mode 100644
index 00000000..6180dbbb
--- /dev/null
+++ b/hurd/faq/which_microkernel.mdwn
@@ -0,0 +1,19 @@
+[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="What happened to the L4/Coyotos/viengoos micro-kernels?"]]
+
+L4 was promising but happened to not be suitable for implementing a general-purpose operating system on top of it. See [[history/port_to_l4]].
+
+Coyotos is abandoned upstream
+
+Neal Walfield started working on a newly designed kernel called [[viengoos|microkernel/viengoos]]. Unfortunately, he currently lacks time and the projects it paused.
+
+In the meanwhile, people are thus continuing with [[microkernel/mach]].
diff --git a/hurd/running/arch_hurd.mdwn b/hurd/running/arch_hurd.mdwn
index cc2ad0f2..9786d144 100644
--- a/hurd/running/arch_hurd.mdwn
+++ b/hurd/running/arch_hurd.mdwn
@@ -10,14 +10,12 @@ License|/fdl]]."]]"""]]
[[!meta title="Arch Hurd"]]
-<http://www.archhurd.org/>
+Arch Hurd is a port of Arch Linux to the GNU Hurd, founded on 2010-01-04 by Michael Walker (Barrucadu) and, with input from a variety of people including Alan McRae (allan), Matthias Lanzinger (melpo), and Alexander Preisinger (giselher), the project has made excellent process. There is a livecd available on the Arch Hurd website, with which you can try or install Arch Hurd.
-From the website:
+### Links
-*Welcome to the Arch Hurd website. Arch Hurd is a derivative work of Arch Linux porting it to the GNU Hurd system with packages optimised for the i686 architecture.*
-*…*
-*We are attempting to bring the spirit of Arch Linux to the Hurd, and if you'd like to help us achieve that, we'd love to hear from you.*
-
-Status as of 2010-07-31:
-
-* LiveCD with [installation guide](http://wiki.archhurd.org/wiki/Installation_Guide).
+* Official Website: <http://www.archhurd.org>
+* Installation Guide: <http://wiki.archhurd.org/wiki/Installation_Guide>
+* Mailing Lists: <http://lists.archhurd.org>
+* Forum: <http://bbs.archhurd.org>
+* IRC: #archhurd on irc.freenode.net
diff --git a/hurd/running/debian/faq.mdwn b/hurd/running/debian/faq.mdwn
index b3bd230d..8aaadf9c 100644
--- a/hurd/running/debian/faq.mdwn
+++ b/hurd/running/debian/faq.mdwn
@@ -1,4 +1,5 @@
-[[!meta copyright="Copyright © 2007, 2009 Free Software Foundation, Inc."]]
+[[!meta copyright="Copyright © 2007, 2009, 2010 Free Software Foundation,
+Inc."]]
[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
id="license" text="Permission is granted to copy, distribute and/or modify this
@@ -10,8 +11,7 @@ is included in the section entitled
[[!meta title="Debian GNU/Hurd FAQ"]]
-See also the [[Hurd_FAQ|/hurd/FAQ]], [[after_install]], and the [[General FAQ
-About Running GNU/Hurd|/hurd/running/faq]].
+See also [[after_install]] instructions, and other [[/FAQ]].
[[!inline
pages="hurd/running/debian/faq/* and !*/discussion"
diff --git a/hurd/running/faq.mdwn b/hurd/running/faq.mdwn
index a59bce7e..2746a20a 100644
--- a/hurd/running/faq.mdwn
+++ b/hurd/running/faq.mdwn
@@ -1,4 +1,4 @@
-[[!meta copyright="Copyright © 2009 Free Software Foundation, Inc."]]
+[[!meta copyright="Copyright © 2009, 2010 Free Software Foundation, Inc."]]
[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
id="license" text="Permission is granted to copy, distribute and/or modify this
@@ -10,7 +10,7 @@ License|/fdl]]."]]"""]]
[[!meta title="General FAQ About Running GNU/Hurd"]]
-See also the [[Hurd FAQ|hurd/FAQ]], and the [[Debian GNU/Hurd FAQ|debian/faq]].
+See also other [[/FAQ]].
[[!inline
pages="hurd/running/faq/* and !*/discussion"
diff --git a/hurd/translator.mdwn b/hurd/translator.mdwn
index 75020cb2..dd5c5b4d 100644
--- a/hurd/translator.mdwn
+++ b/hurd/translator.mdwn
@@ -78,6 +78,7 @@ Read about translator [[short-circuiting]].
* [[fatfs]]
* [[magic]]
* [[unionfs]]
+* [[nfs]]
* ...
@@ -89,6 +90,9 @@ Read about translator [[short-circuiting]].
* [[tmpfs]]
* [[procfs]]
* [[nsmux]]
+* [[netio]]
+* [[tarfs]]
+* [[gopherfs]]
* ...
# Translators (only) in Hurdextras
@@ -99,13 +103,11 @@ Read about translator [[short-circuiting]].
* [httpfs](http://www.nongnu.org/hurdextras/#httpfs)
* [gopherfs](http://www.nongnu.org/hurdextras/#cvsfs)
* [memfs](http://www.nongnu.org/hurdextras/#gopherfs)
-* [netio](http://www.nongnu.org/hurdextras/#netio)
* [notice](http://www.nongnu.org/hurdextras/#notice)
* [pith](http://www.nongnu.org/hurdextras/#pith)
* [pptop](http://www.nongnu.org/hurdextras/#pptop)
* [run](http://www.nongnu.org/hurdextras/#run)
* [smbfs](http://www.nongnu.org/hurdextras/#smbfs)
-* [tarfs](http://www.nongnu.org/hurdextras/#tarfs)
* [xmlfs](http://www.nongnu.org/hurdextras/#xmlfs)
* [mboxfs](http://www.nongnu.org/hurdextras/#mboxfs)
diff --git a/hurd/translator/examples.mdwn b/hurd/translator/examples.mdwn
index 6319df77..ee766fbf 100644
--- a/hurd/translator/examples.mdwn
+++ b/hurd/translator/examples.mdwn
@@ -36,9 +36,9 @@ or
ftp$ cd ftp.fr.debian.org
ftp/ftp.fr.debian.org $ ls
-* tarfs translator
+* tarfs translator (needs uatime fix, 2010-08-25 → [git repo](http://github.com/giselher/tarfs))
-You can use tarfs to mount (almost) any tar file:
+You can use tarfs to mount (almost) any tar file (currently broken, 2010-08-25):
$ settrans -ca a /hurd/tarfs -z myfile.tar.gz
$ settrans -ca b /hurd/tarfs -y myfile.tar.bz2
@@ -50,7 +50,7 @@ You can even use it to create new tar files:
$ cp -r all my files new/
$ syncfs new
-This is not as fast as `tar czvf newfile.tar.gz all my files` but at least, it's more original. ;)
+This is not as fast as `tar czvf newfile.tar.gz all my files`, but at least it's more original. ;)
* cvsfs translator
diff --git a/hurd/translator/ext2fs.mdwn b/hurd/translator/ext2fs.mdwn
index 69d035db..305576b8 100644
--- a/hurd/translator/ext2fs.mdwn
+++ b/hurd/translator/ext2fs.mdwn
@@ -9,14 +9,23 @@ Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
is included in the section entitled
[[GNU Free Documentation License|/fdl]]."]]"""]]
-# Issues
+# Large Stores
The `ext2fs` translator from the upstream Hurd code base can only handle file
systems with sizes of less than roughly 2 GiB.
[[!tag open_issue_hurd]]
-A patch exists to lift this limitation (and is being used in the
+## Ognyan's Work
+
+ * Ognyan Kulev, [[*Supporting Large ext2 File Systems in the
+ Hurd*|ogi-fosdem2005.mgp]], 2005, at FOSDEM
+
+ * Ognyan Kulev, [[large_stores]]
+
+ * <http://kerneltrap.org/node/4429>
+
+Ognyan's patch lifts this limitation (and is being used in the
[[Debian_GNU/Hurd_distribution|running/debian]]), but it introduces another
incompatibility: `ext2fs` then only supports block sizes of 4096 bytes.
Smaller block sizes are commonly automatically selected by `mke2fs` when using
diff --git a/hurd/translator/ext2fs/large_stores.txt b/hurd/translator/ext2fs/large_stores.txt
new file mode 100644
index 00000000..e17a02a5
--- /dev/null
+++ b/hurd/translator/ext2fs/large_stores.txt
@@ -0,0 +1,510 @@
+This is -*- mode: outline -*-
+
+* Introduction
+
+Here is a try to describe the ext2fs patch for the Hurd. This patch
+allows using partitions/stores larger that approximately 1.5G by not
+memory mapping the whole store to address space.
+
+As a guideline, the changelog of RC1 (Release Candidate 1) is
+followed, so I hope nothing is missed. During writing of this text,
+some questions arised and they are marked with XXX. An effort will be
+made to fix all these for RC2.
+
+ Ognyan Kulev <ogi@fmi.uni-sofia.bg>
+
+* The block layer and its purpose
+
+The basic unit of ext2 filesystem is "block". All filesystem
+operation work on blocks which are read, and sometimes modified and
+written back. Possible block sizes are 1K, 2K and 4K, but current
+implementation works reliably only on 4K blocks (= page size of i386).
+
+So the two basic operations on blocks are "reading" block and
+"writing" block.
+
+* Current implementation
+
+** Reading
+
+Currently, the whole store is memory mapped into address space of
+ext2fs process. The is called "disk image", although "store image"
+would be more accurate. The address of the start of the disk image is
+stored in pager.c:disk_image. So "reading" block is easy: just
+calculate byte offset of block and add it to disk_image. The resulting
+address points to the start of the desired block.
+
+The macro ext2fs.h:bptr has exactly this purpose: given block number,
+it returns pointer to block. Sometimes we have pointer somewhere in
+the block, and we want the block number. This is calculated by
+ext2fs.h:bptr_block.
+
+There is another set of macros that use byte offsets instead of block
+numbers. These are boffs_ptr (store offset -> memory pointer) and
+bptr_offs (memory pointer -> store offset).
+
+Converting between store offset and block number is easy with macros
+boffs (block -> offset) and boffs_block (offset -> block). Other
+useful macros are trunc_block and round_block.
+
+** Writing
+
+Modifying block and saving it is not that straight-forward as
+reading. For writing, you need to use "pokel" ("poked elements").
+Pokel interface is in ext2fs.h. Implementation is in pokel.c.
+
+The problem is that generally multiple blocks are modified and we want
+all these changes to hit disk at relatively same time. So we can't
+just change block and leave decision when it's going to be written to
+the microkernel.
+
+So there is a pokel for each set of changes and each change should be
+reported to the pokel by calling pokel_add. When this set of changes
+is completed, pokel_sync of pokel_flush is called. (The latter is
+used to ignore changes.)
+
+In practice, there is one indir_pokel for each ext2fs.h:disknode,
+which is used for indirect blocks of ext2fs. The only other pokel
+used is ext2fs.h:global_pokel, where all other changes to metadata are
+registered.
+
+* Proposed implementation
+
+First one must realize that the idea of mapping the whole store is to
+be thrown away. So only parts of the store should be mapped. These
+currently mapped parts of store are collectively called "cache".
+
+In the proposed implementation, the cache has fixed size of
+ext2fs.h:DISK_CACHE_BLOCKS. In RC1, it's 100, but this is only to
+easily catch bugs. In practice, it can be, for example, 512M, or
+(512*1024/4) blocks of 4K. pager.c:disk_cache_size and
+pager.c:disk_cache_blocks are additional variables about that
+information.
+
+The cached blocks are mapped in ext2fs.h:disk_cache and span
+disk_cache_size bytes (= disk_cache_blocks blocks). As in the
+original implementation, this part of address space is handled by
+custom pager.
+
+** Data structures
+
+Blocks in cache aren't consecutive, so we need data structure to hold
+which part of address space represents what block. This is the
+purpose of pager.c:disk_cache_info. Index in this array is "cached
+block index". But this array doesn't help in finding if specific
+block is mapped, and where. This is the purpose of the
+pager.c:disk_cache_bptr ihash which finds cached block index from
+given block number. Both data structures are guarded by
+pager.c:disk_cache_lock.
+
+** Public interface
+
+"Public" interface to the cache are functions disk_cache_block_ref,
+disk_cache_block_ref_ptr, disk_cache_block_deref,
+disk_cache_block_is_ref. disk_cache_block_ref takes block number and
+return pointer to block content. Reference count of this cached block
+is incremented. After finishing work with block,
+disk_cache_block_deref should be called.
+
+In converting original ext2fs code to use this functions, usually call
+to bptr is turned into call to disk_cache_block_ref. In addition,
+after pointer to block content is not used anymore,
+disk_cache_block_deref is called. This simple scheme is only for
+reading from block. For modifying block, see about pokels below.
+
+disk_cache_block_ref_ptr just increments reference count of specified
+block. It's used when we give pointer to block content to somebody
+else that will dereference it (e.g. pokel) and we want to continue to
+use this content.
+
+disk_cache_block_is_ref checks if specified block has reference count
+greater than zero. It's used in assert:s.
+
+*** bptr* and boffs* macros
+
+These macros continue to work as before, but they don't deal with
+reference counting and this should be taken into consideration. In
+addition, bptr_index returns cached block index from given pointer to
+block content. (This function is used internally.)
+
+*** Pokels
+
+When pokel_add is called with pointer to block content, this
+"consumes" reference of block. It's not consumed (decremented by 1)
+immediately, but when pokel_sync or pokel_flush is called. (Reference
+is consumed immediately if the block is already in the pokel. The
+important thing is that you always lose one reference of the block.)
+
+So we have the following code when we read from block:
+
+ char *bh = disk_cache_block_ref (block);
+ ...
+ disk_cache_block_deref (bh);
+
+And the following code when we modify block:
+
+ char *bh = disk_cache_block_ref (block);
+ ...
+ pokel_add (pokel, bh, block_size);
+
+**** Indirect calls to pokel_add
+
+Some functions indirectly call pokel_add, so this should be taken into
+consideration. These are:
+
+ * record_global_poke
+ * record_indir_poke
+
+So these functions should be treated in the same scheme as pokel_add.
+For example:
+
+ char *bh = disk_cache_block_ref (block);
+ ...
+ record_indir_poke (node, bh);
+
+**** Modifying SBLOCK in diskfs_set_hypermetadata
+
+SBLOCK is global variable that points to superblock content. There is
+one reference count for superblock, so before we call
+record_global_poke (which consumes reference),
+disk_cache_block_ref_ptr is called.
+
+**** Modifying GDP
+
+When group descriptor is wanted, usuall group_desc is called and
+result is stored in local variable GDP. After modifying GDP,
+record_global_poke is called. But because record_global_poke is used,
+we need call to disk_cache_block_ref_ptr:
+
+ gdp = group_desc (i);
+ ...
+ disk_cache_block_ref_ptr (gdp);
+ record_global_poke (gdp);
+
+*** More complex use of pointer to block content
+
+In ext2_new_block and ext2_alloc_inode functions, we have local
+pointer variable BH that sometimes points to block content and
+sometimes points to nothing. In order to reduce possible errors, when
+BH points to nothing it's always 0. In some points (goto labels),
+there is assertion if BH is what's expected (pointer to nothing or
+pointer to something).
+
+*** dino
+
+dino function return pointer to struct ext2_inode for given ino_t.
+This uses reference, so corresponding disk_cache_block_deref should be
+called after finishing work with ext2_inode. For convenience, dino is
+renamed to dino_ref, and dino_deref just calls disk_cache_block_deref.
+
+ struct ext2_inode *di = dino_ref (np->cache_id);
+ ...
+ dino_deref (di);
+
+Or
+
+ struct ext2_inode *di = dino_ref (np->cache_id);
+ ...
+ sync_global_ptr (di, 1);
+ dino_deref (di);
+
+Or
+
+ struct ext2_inode *di = dino_ref (np->cache_id);
+ ...
+ record_global_poke (di);
+
+* Internals of the proposed implementation
+
+As said earlier, instead of mapping the whole store of filesystem to
+address space, only part of it is mapped. This part is called "cache"
+or "disk cache" (although "store cache" would be more appropriate).
+Currently, the cache is contiguous area in address space that starts
+at disk_cache. Its size is disk_cache_size which is disk_cache_blocks
+number of blocks of size block_size.
+
+Mapped blocks in disk cache are not fixed -- each block in the cache
+can be replaced at any time with another block. So we need to know
+which blocks are cached currently and where. Information about each
+cached block is stored in disk_cache_info[]. Index is from 0 to
+disk_cache_blocks-1. In this information the block number is stored
+(among some other things, discussed later). The reverse direction,
+getting the index of cached block from block number, is achieved by
+using disk_cache_bptr ihash. Both these data structures are guarded
+by disk_cache_lock.
+
+** Requesting a block
+
+When ext2 code requests block, it calls disk_cache_block_ref. First,
+this block is search with disk_cache_bptr. If its there, the
+reference count is incremented and pointer to block content is
+returned. In this case, there is a call to disk_cache_wait_remapping,
+which is explained a bit later.
+
+It's more interesting when block is not found in disk_cache_bptr. In
+this case, disk_cache_map is called. Again, disk_cache_bptr is
+consulted, because in the meantime another could already have mapped
+this block. If this is the case, the code is essentially the same as
+those in disk_cache_block_ref.
+
+When it's assured that block is not in the cache, we have no choice
+but throw away an already mapped/cached block and put our block in its
+place. Such block has to meet the following conditions:
+
+- Its reference count being 0
+- Not in the core
+- Not being remapped (explained later)
+- Not being forbidden to be remapped ("fixed", explained later)
+
+The last three conditions are actually flags in disk_cache_info:
+DC_INCORE, DC_REMAPPING and DC_FIXED. DC_DONT_REUSE collectively
+gives the condition in which block is not suitable for
+reusing/remapping.
+
+Searching suitable place in cache is linear. As an optimisation, this
+search doesn't start from the beginning, but starts from where last
+time it has ended. This last index is stored in disk_cache_hint. So
+new candidate blocks for replacement are searched "circular".
+
+If suitable place is found, the old mapping is removed, and the new
+mapping is initialized. But we are still not ready to return pointer
+to block content, because this content is not available yet. We mark
+the block as DC_REMAPPING, which makes disk_cache_block_ref for that
+block in other threads to wait until page is completely remapped.
+
+In both cases, when we have found place and when suitable place is not
+found, disk_cache_hint is updated so that next disk_cache_map
+continues searching from where we ended.
+
+When not suitable place is found, we have to use force. First all
+pages in disk cache are touched. This is workaround because of some
+bug in GNU Mach. The patch relies on "precious page" features of
+Mach. Marking a page as precious instructs Mach to always inform us
+about evicting this page. If page is modified, it seems that we are
+always informed. But if page is unmodified and page is evicted,
+sometimes Mach forgets to tell us. It's true that with large disk
+cache, e.g. 512M, this potentially will re-read the whole cache from
+disk. But if we reach this point, the microkernel is telling us that
+all is already read :-)
+
+This is preparation for following calls to pager_return_some. This
+libpager function is called only on cached blocks that has reference
+count of 0. These are the potential candidates for replacement --
+there is no sense in calling pager_return_some when reference count is
+1 or more. One final case is when there is no cached block that has
+reference count of 0. This is bad and we can't do anything about it.
+In this case, we just wait one second hoping that some other thread
+will drop reference count of block to 0. (XXX Currently (in RC1)
+sleep(1) is always executed. It should be executed only when disk
+cache is starving. There is some rationale behind calling sleep(1) even when
+disk cache is not starving. Although pager_return_some(,,,1)
+guarantees that upon return of this function the page is returned, I'm
+not sure that it's guaranteed that pager_notify_pageout is called.
+This is because pager_return_some and
+libpager/data-return.c:_pager_do_write_request are executed in
+different threads and pager_return_some is confirmed before calling
+pager_notify_pageout. This issue is open.)
+
+So, after forcibly evicting all pages (blocks) that can potentially be
+reused, disk_cache_map is called again.
+
+In the case when suitable place is found and all data structures
+(disk_cache_info and disk_cache_bptr) are changed accordingly,
+pager_return_some(,,,1) is called and we wait for pager_read_page to
+clear DC_REMAPPING. The purpose of this flag (DC_REMAPPING) is solely
+this: to forbid any use of this block until we are absolutely sure
+that this page contains exactly the wanted block. If NDEBUG is not
+defined (so we include debug code), flags of the blocks are checked if
+DC_REMAPPING is really cleared.
+
+Is DC_REMAPPING really needed? Is there possibility that between last
+"mutex_unlock (&disk_cache_lock)" and "return bptr" something could go
+wrong? Actually, disk cache just follows protocol set by
+pager_notify_pageout: that between pager_return_some and changing
+internal structures for the remapping no thread may touch the page.
+This is achieved by marking the page as DC_REMAPPING. For
+convenience, function disk_cache_wait_remapping is defined which waits
+for cached block while it's marked as DC_REMAPPING.
+
+XXX XXX: Actually, the sequence used in RC1 is: remap block and
+pager_return_some. The latter seems redundant, as only blocks that
+are evicted are candidates for remapping. I'll try to fix that for
+RC2.
+
+** Modifying blocks and pokels
+
+After block is modified, it should be registered with pokel_add to
+some pokel. Pokel contains list of ranges of cached blocks. All this
+blocks should have reference count at least 1. In pokel_flush and
+pokel_sync, this reference is consumed.
+
+So in pokel_add if added blocks are already in the pokel, their
+references are consumed, because only 1 reference is consumed in
+pokel_{sync,flush}. It's checked if pokel is for disk_cache, because
+pokels are used in file access too, where disk cache layer is not
+used.
+
+pokel_{flush,sync} both use _pokel_exec, so this is the place where
+block references are consumed. (XXX: In RC1, they are consumed
+always, but it's better to check if these pages are in disk_cache.
+Although calling disk_cache_block_deref on non-disk_cache page does no
+harm.)
+
+*** Indirect use of pokel_add
+
+record_global_poke and record_indir_poke use indirectly pokel_add.
+These functions are slightly changed to use public interface of
+disk_cache. Only new precondition is added for them: caller should
+supply "reference" that will be consumed later by pokel_{flush,sync}.
+
+*** Modifying block without using pokels
+
+sync_global_ptr synchronizes given block immediately. No reference is
+consumed. (XXX: This should be changed in RC2 to consuming reference.
+This will make the function similar in use to
+record_{global,indir}_poke and will make the code more nice-looking.)
+
+** Initialization
+
+*** The superblock
+
+To create disk cache, we need the block size of the filesystem. This
+information is in superblock, so we need to read superblock without
+using disk cache. For this purpose get_hypermetadata is changed to
+read the superblock with store_read instead of old bptr. New function
+map_hypermetadata is created that sets sblock global variable to point
+to the already mapped superblock. So to get behavior of old
+get_hypermetadata, first new get_hypermetadata should be called, and
+then map_hypermetadata.
+
+In ext2fs.c:main, instead of calling get_hypermetadata,
+map_hypermetadata is called. The call to get_hypermetadata is in
+pager.c:create_disk_pager.
+
+In ext2fs.c:diskfs_reload_global_state, along with get_hypermetada,
+map_hypermetadata is called.
+
+*** disk_cache
+
+Disk cache data structures are initialized in
+pager.c:create_disk_pager called from ext2fs.c:main. Disk pager is
+still initialized with diskfs_start_disk_pager, but due to block_size
+variable we call get_hypermetadata. Basic parameters of disk cache
+like disk_cache_blocks and disk_cache_size are initialized here. The
+rest of the initialization process is delegated to disk_cache_init.
+
+disk_cache_init initializes the rest of disk cache data structures:
+disk_cache_lock, disk_cache_remapping, disk_cache_bptr,
+disk_cache_info and disk_cache_hint. After that superblock and group
+descriptors are mapped into the cached and are marked as DC_FIXED.
+This forbids reusing those blocks, because Hurd's ext2 code relies on
+these blocks being mapped into fixed location in address space.
+
+** Pager callbacks
+
+disk_pager_read_page and disk_pager_write_page just use disk cache
+data structures to get the right pointers to blocks.
+disk_pager_read_page requests notification of page-out and updates
+DC_INCORE and DC_REMAPPING too. DC_INCORE is set and DC_REMAPPING is
+cleared (because reading the new block finishes its remapping).
+
+disk_pager_notify_pageout just clears DC_INCORE, making that page
+available for remapping.
+
+* libpager changes
+
+Here memory_object_data_ prefix is shorten to m_o_d_. And when it's
+talked about m_o_d_function Mach function, usually its libpager
+handler is meant.
+
+** Notification on eviction
+
+The most important change that is wanted from libpager is supporting
+notification when page is evicted. Mach already has partial support
+for notification on eviction by argument "kcopy" of m_o_d_return. If
+kcopy is 0, then Mach doesn't have copy of this page anymore, so the
+page is "evicted". The problem is that m_o_d_return is usually called
+only when page is modified, and if it's not modified, it's silently
+dropped.
+
+The solutions is marking page as "precious". This has the exact
+semantics we need: when page is evicted, m_o_d_return callback is
+always called with kcopy=0.
+
+*** Implementation details
+
+New argument is added to user callback pager_read_page:
+notify_on_pageout. If it's non-zero and the page is evicted, user
+callback pager_notify_pageout(pager,page) is called. This change ABI
+requires all libpager clients in the Hurd to be changed according to
+the new API.
+
+m_o_d_request stores notify_on_pageout as flag PM_NOTIFY_PAGEOUT.
+
+m_o_d_return no longer just skips non-dirty pages. Local array
+notified[] is build and at the end of the function,
+pager_notify_pageout is called for all pages that are evicted
+(kcopy=0).
+
+** Avoiding libpager optimization
+
+Unfortunately, there is one more problem, this time specific to
+libpager, not Mach. There is an optimization in m_o_d_request when
+page is being paged out. In the beginning of m_o_d_return, all pages
+being return are marked as PM_PAGINGOUT. This mark is cleared after
+m_o_d_supply (which supplies page content to Mach) is called. If
+m_o_d_request is called on page that is marked as PM_PAGINGOUT, this
+page is marked with PM_PAGEINWAIT, and m_o_d_supply inside
+m_o_d_return is not called for this page. This is possible because
+neither of these functions hold pager->interlock during the whole
+execution of function. This lock is temporarily unlocked during call
+to user callbacks pager_read_page and pager_write_page.
+
+So what is the implication of this optimization to our page eviction
+notification? When page is paged out, we get notified and we can
+decide to reuse it. After arranging disk_cache_info, etc, page is
+touched, but if this happens fast enough, the optimization is
+triggered and we get the old content! Reading the page is "optimized"
+and pager_read_page is not called, but instead the content of old
+block is used.
+
+This is solved by marking flushed and synced pages (via
+pager_{flush,sync}{,_some} with PM_FORCEREAD. (These functions call
+lock-object.c:_pager_lock_object which marks pages with PM_FORCEREAD
+if they are already marked with PM_NOTIFY_PAGEOUT.) In handling
+m_o_d_request, pages marked as PM_FORCEREAD are not optimized in this
+way. XXX: Currently, this fine-grained logic is disabled (with #if),
+as it needs more testing. Probably RC2 will use it. For now, all
+pages are considered PM_FORCEREAD and this particular optimization
+never happens.
+
+*** Technical details
+
+As said above, we need guarantee that after pager_{sync,flush}*,
+pager_read_page callback is called. The most convenient place to mark
+these pages as being forced to re-read is
+lock-object.c:_pager_lock_object, because this function is used by all
+pager_{sync,flush}* functions. So there we just mark page as
+PM_FORCEREAD if it's already marked as PM_NOTIFY_PAGEOUT.
+
+First, this mark influences behaviour of m_o_d_request. If page is
+marked with PM_FORCEREAD and PM_PAGINGOUT, then we set PM_PAGEINWAIT
+and wait until related m_o_d_return finishes (unmarks PM_PAGEINWAIT).
+Then we continue with pager_read_page, etc. If page is not marked
+with PM_FORCEREAD and is marked with PM_PAGINGOUT, then old logic is
+used and pager_read_page is not called (because m_o_d_return handler
+will call m_o_d_supply instead of us). (XXX: Again, this logic is
+inside #if 0. Currently, all pages are considered as marked with
+PM_FORCEREAD.)
+
+The other place where PM_FORCEREAD is taken into consideration is
+handler of m_o_d_return. The original code checks if page is marked
+with PM_PAGEINWAIT, and if it is, m_o_d_supply is called for the just
+written page. PM_PAGEINWAIT is used as "delegator" of the
+m_o_d_supply call to Mach.
+
+In patched libpager, there is one more condition for when to call
+m_o_d_supply. It's called when page is marked as PM_PAGEINWAIT and
+not marked as PM_FORCEREAD. If it's marked as PM_FORCEREAD, then we
+leave m_o_d_supply to m_o_d_request handler which gets notified by
+condition pager->wakeup.
diff --git a/hurd/translator/ext2fs/ogi-fosdem2005.mgp b/hurd/translator/ext2fs/ogi-fosdem2005.mgp
new file mode 100644
index 00000000..27b5077c
--- /dev/null
+++ b/hurd/translator/ext2fs/ogi-fosdem2005.mgp
@@ -0,0 +1,165 @@
+# "Supporting Larger ext2 File Systems in the Hurd"
+# Written by Ognyan Kulev for presentation at FOSDEM 2005.
+# Content of this file is in public domain.
+%include "default.mgp"
+%page
+%nodefault
+%center, font "thick", size 5
+
+
+
+
+Supporting Larger ext2 File Systems in the Hurd
+
+
+
+%font "standard", size 4
+Ognyan Kulev
+%size 3
+<ogi@fmi.uni-sofia.bg>
+
+
+%size 4
+FOSDEM 2005
+
+%page
+
+Need for supporting larger file systems
+
+ Active development during 1995-1997
+
+ Hurd 0.2 was released in 1997 and it was very buggy
+
+ Many bugs are fixed since then
+
+ The 2G limit for ext2 file systems becomes more and more annoying
+
+%page
+
+Timeline
+
+ 2002: Time for graduating, fixing the 2G limit in Hurd's ext2fs and implementing ext3fs were chosen for MSc thesis
+
+ 2003: First alfa quality patch
+
+ 2004: Graduation, ext2fs patch in Debian, but ext3fs is unstable
+
+%page
+
+User pager in GNU Mach
+
+ Address space
+ memory_object_data_supply
+ memory_object_data_return
+ Memory object (Mach concept)
+ pager_read_page
+ pager_write_page
+ User-supplied backstore (libpager concept)
+
+%page
+
+Current ext2fs
+
+ Memory mapping of the whole store
+
+ Applies only for metadata!
+
+ bptr (block -> data pointer)
+ = image pointer + block * block_size
+
+ Inode and group descriptor tables are used as if they are continous in memory
+
+%page
+
+Patched ext2fs, part one
+
+ Address space region
+ mapping
+ Array of buffers
+ association
+ Store
+
+ Association of buffers changes (reassocation)
+
+ It's important reassociation to occur on buffers that are not in core
+
+%page
+
+Patched ext2fs, part two
+
+ Always use buffer guarded by
+ disk_cache_block_ref (block -> buffer)
+ disk_cache_block_deref (release buffer)
+
+ Buffer = data + reference count + flags (e.g. INCORE)
+
+ Calling some functions implies releasing buffer:
+ pokel_add (pokels are list of dirty buffers)
+ record_global_poke (use pokel_add)
+ sync_global_ptr (sync immediately)
+ record_indir_poke (use pokel_add)
+
+ Use ihash for mapping block to buffer
+
+%page
+
+When unassociated block is requested
+
+
+%font "typewriter", size 4, cont
+retry:
+ i = hint;
+ while (buffers[i] is referenced or in core) {
+ i = (i + 1) % nbuffers;
+ if (i == hint) {
+ return_unreferenced_buffers ();
+ goto retry;
+ }
+ }
+ hint = i + 1;
+
+ deassociate (buffers[i]);
+ associate (buffers[i], block);
+
+ return buffers[i];
+
+%page
+
+Notification for evicted pages
+
+ Notification is essential for optimal reassociation
+
+ Precious pages in Mach
+
+ Slight change to API and ABI of libpager is required
+
+ Mach sometimes doesn't notify!
+
+%page
+
+Pager optimization
+
+1. Mach returns page to pager without leaving it in core
+
+2. Pager becomes unlocked because of calling callback pager_write_page
+
+3. User task touches the page
+
+4. Mach requests the same page from pager
+
+5. XXX Pager supplies the page that was returned by Mach, instead of calling callback pager_read_page
+
+%page
+
+Future directions
+
+ Committing in the Hurd :-)
+ Block sizes of 1K and 2K
+ Run-time option for buffer array size (?)
+ Compile-time option for memory-mapping the whole store
+ Upgrade of UFS
+ Extended attributes (EAs) and Access control lists (ACLs)
+
+# Local Variables:
+# mgp-options: "-g 640x480"
+# End:
diff --git a/hurd/translator/gopherfs.mdwn b/hurd/translator/gopherfs.mdwn
new file mode 100644
index 00000000..6c32430f
--- /dev/null
+++ b/hurd/translator/gopherfs.mdwn
@@ -0,0 +1,16 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+`gopherfs` is a virtual filesystem allowing you to access Gopher sites.
+
+
+# Source
+
+incubator, gopherfs/master
diff --git a/hurd/translator/netio.mdwn b/hurd/translator/netio.mdwn
new file mode 100644
index 00000000..aca9cd69
--- /dev/null
+++ b/hurd/translator/netio.mdwn
@@ -0,0 +1,17 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+`netio` is a translator designed for creating socket ports through the
+filesystem.
+
+
+# Source
+
+incubator, netio/master
diff --git a/hurd/translator/tarfs.mdwn b/hurd/translator/tarfs.mdwn
new file mode 100644
index 00000000..e25e3255
--- /dev/null
+++ b/hurd/translator/tarfs.mdwn
@@ -0,0 +1,25 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+`tarfs` is a translator aimed at providing access to tar files through the
+filesystem. This way you don't have to extract files from the archive to
+access them. It supports compressed archives (bzip2 and gzip) through
+[[libstore]].
+
+
+# Status
+
+Works fine on most cases, occasional corruptions when writing using bzip2/gzip
+stores.
+
+
+# Source
+
+incubator, tarfs/master
diff --git a/ikiwiki.setup b/ikiwiki.setup
index 89130e2b..2a6e6540 100644
--- a/ikiwiki.setup
+++ b/ikiwiki.setup
@@ -40,7 +40,9 @@ IkiWiki::Setup::Standard->import({
adminuser => [qw{tschwinge}],
# users who are banned from the wiki
banned_users => [qw{AlbertF bernhart ColetCris flamberian jasclaine
+ NateNash
http://calvinyoung.myopenid.com/
+ http://heaton.myopenid.com/
http://hilarybunton.myopenid.com/}],
# where the source of the wiki is located
srcdir => $srcdir,
@@ -57,7 +59,11 @@ IkiWiki::Setup::Standard->import({
# rcs backend to use
rcs => 'git',
# plugins to add to the default configuration
- add_plugins => [qw{goodstuff cutpaste editdiff edittemplate favicon html sidebar table txt copyright license texinfo}],
+ add_plugins => [qw{goodstuff
+ cutpaste editdiff edittemplate favicon getsource
+ html rename repolist search sidebar table txt
+ field getfield ymlfront
+ copyright license texinfo}],
# plugins to disable
disable_plugins => [],
# additional directory to search for template files
@@ -115,12 +121,14 @@ IkiWiki::Setup::Standard->import({
######################################################################
# core plugins
- # (editpage, git, htmlscrubber, inline, link, meta)
+ # (editpage, git, htmlscrubber, inline, link, meta, parentlinks)
######################################################################
# git plugin
# git hook to generate
git_wrapper => $git_wrapper,
+ # shell command for git_wrapper to run, in the background
+ #git_wrapper_background_command => 'git push github',
# mode for git_wrapper (can safely be made suid)
git_wrappermode => '0700',
# git pre-receive hook to generate
@@ -184,6 +192,12 @@ IkiWiki::Setup::Standard->import({
# PageSpec matching users or comment locations to moderate
#moderate_pagespec => '*',
+ # openid plugin
+ # url pattern of openid realm (default is cgiurl)
+ #openid_realm => '',
+ # url to ikiwiki cgi to use for openid authentication (default is cgiurl)
+ #openid_cgiurl => '',
+
# passwordauth plugin
# a password that must be entered when signing up for an account
#account_creation_password => 's3cr1t',
@@ -192,40 +206,31 @@ IkiWiki::Setup::Standard->import({
######################################################################
# format plugins
- # (creole, highlight, hnb, html, mdwn, otl, po, rawhtml, textile,
- # txt)
+ # (creole, highlight, hnb, html, mdwn, otl, rawhtml, textile, txt)
######################################################################
# highlight plugin
# types of source files to syntax highlight
#tohighlight => '.c .h .cpp .pl .py Makefile:make',
+ # location of highlight's filetypes.conf
+ #filetypes_conf => '/etc/highlight/filetypes.conf',
+ # location of highlight's langDefs directory
+ #langdefdir => '/usr/share/highlight/langDefs',
# mdwn plugin
# enable multimarkdown features?
#multimarkdown => 0,
- # po plugin
- # master language (non-PO files)
- #po_master_language => {
- # code => 'en',
- # name => 'English'
- #},
- # slave languages (PO files)
- #po_slave_languages => {
- # de => 'Deutsch',
- # es => 'Español',
- # fr => 'Français'
- #},
- # PageSpec controlling which pages are translatable
- #po_translatable_pages => '* and !*/Discussion',
- # internal linking behavior (default/current/negotiated)
- #po_link_to => 'current',
+ ######################################################################
+ # misc plugins
+ # (filecheck)
+ ######################################################################
######################################################################
# web plugins
- # (attachment, comments, editdiff, edittemplate, getsource,
- # google, mirrorlist, remove, rename, repolist, search,
- # websetup, wmd)
+ # (404, attachment, comments, editdiff, edittemplate, getsource,
+ # google, goto, mirrorlist, remove, rename, repolist, search,
+ # theme, websetup, wmd)
######################################################################
# attachment plugin
@@ -258,12 +263,19 @@ IkiWiki::Setup::Standard->import({
# repolist plugin
# URIs of repositories containing the wiki's source
- #repositories => [qw{svn://svn.example.org/wiki/trunk}],
+ repositories => [qw{git://git.savannah.gnu.org/hurd/web.git
+ http://git.savannah.gnu.org/r/hurd/web.git
+ git://flubber.bddebian.com/~hurd-web/hurd-web
+ http://www.bddebian.com:8888/git/hurd-web}],
# search plugin
# path to the omega cgi program
#omega_cgi => '/usr/lib/cgi-bin/omega/omega',
+ # theme plugin
+ # name of theme to enable
+ #theme => 'actiontabs',
+
# websetup plugin
# list of plugins that cannot be enabled/disabled via the web interface
#websetup_force_plugins => [],
@@ -285,7 +297,7 @@ IkiWiki::Setup::Standard->import({
# base of the archives hierarchy
#archivebase => 'archives',
# PageSpec of pages to include in the archives; used by ikiwiki-calendar command
- #archive_pagespec => 'posts/* and !*/Discussion',
+ #archive_pagespec => 'page(posts/*) and !*/Discussion',
# listdirectives plugin
# directory in srcdir that contains directive descriptions
@@ -308,10 +320,11 @@ IkiWiki::Setup::Standard->import({
######################################################################
# other plugins
# (aggregate, autoindex, brokenlinks, camelcase, ddate, embed,
- # favicon, goodstuff, htmlbalance, localstyle, pagetemplate,
- # pingee, pinger, prettydate, recentchanges, recentchangesdiff,
- # relativedate, rsync, sidebar, smiley, sortnaturally, tag,
- # testpagespec, underlay)
+ # favicon, field, flattr, getfield, goodstuff, htmlbalance,
+ # localstyle, pagetemplate, pingee, pinger, prettydate,
+ # recentchanges, recentchangesdiff, relativedate, rsync,
+ # sidebar, smiley, sortnaturally, tag, testpagespec, underlay,
+ # ymlfront)
######################################################################
# aggregate plugin
@@ -324,6 +337,18 @@ IkiWiki::Setup::Standard->import({
# list of words to not turn into links
#camelcase_ignore => [],
+ # field plugin
+ # simple registration of fields by plugin
+ field_register => {meta => 'last'},
+ # allow config settings to be queried
+ #field_allow_config => 0,
+ # fields flagged as tag-fields
+ #field_tags => {BookAuthor => '/books/authors'},
+
+ # flattr plugin
+ # userid or user name to use by default for Flattr buttons
+ #flattr_userid => 'joeyh',
+
# pinger plugin
# how many seconds to try pinging before timing out
#pinger_timeout => 15,
@@ -355,4 +380,8 @@ IkiWiki::Setup::Standard->import({
# underlay plugin
# extra underlay directories to add
#add_underlays => '',
+
+ # ymlfront plugin
+ # delimiters of YAML data
+ ymlfront_delim => [qw{--YAML-START-- --YAML-END--}],
})
diff --git a/media_appearances.mdwn b/media_appearances.mdwn
index 8fe72752..08b9cd0d 100644
--- a/media_appearances.mdwn
+++ b/media_appearances.mdwn
@@ -16,13 +16,12 @@ A lot of stuff is missing here.
## August
- * DebConf10 presentation (including video) by Michael Banck: [*Debian
- GNU/Hurd -- Past. Present. And
- Future?*](http://penta.debconf.org/dc10_schedule/events/595.en.html)
- ([slides](http://people.debian.org/~mbanck/debian-hurd.pdf)).
+ * DebConf10: {{$community/meetings/debconf10#banck_hurd}}
## July
+ * GNU Hackers Meeting in the Hague: {{$community/meetings/ghm2010#walfield_hurd}}
+
* Koen Vervloesem: [*The Hurd: GNU's quest for the perfect
kernel*](http://lwn.net/Articles/395150/)
diff --git a/microkernel/faq.mdwn b/microkernel/faq.mdwn
index aa98403a..fe259f05 100644
--- a/microkernel/faq.mdwn
+++ b/microkernel/faq.mdwn
@@ -1,4 +1,5 @@
-[[!meta copyright="Copyright © 2008, 2009 Free Software Foundation, Inc."]]
+[[!meta copyright="Copyright © 2008, 2009, 2010 Free Software Foundation,
+Inc."]]
[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
id="license" text="Permission is granted to copy, distribute and/or modify this
@@ -10,6 +11,8 @@ is included in the section entitled
[[!meta title="Microkernel FAQ"]]
+See also other [[/FAQ]].
+
[[!inline
pages="microkernel/faq/* and !*/discussion"
show=0
diff --git a/open_issues/bash_busy-loop.mdwn b/open_issues/bash_busy-loop.mdwn
new file mode 100644
index 00000000..5228ba33
--- /dev/null
+++ b/open_issues/bash_busy-loop.mdwn
@@ -0,0 +1,33 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+I've first seen this problem after having had the following command line run
+for a week, or two, or three:
+
+Start `screen`. Find PID of pfinet.
+
+ $ while sleep 66; do echo "$(date)" " $(ps --no-header --format=hurd -p [PID])"; done | tee ps-pfinet
+
+Leave it running, detach from `screen`.
+
+Eventually, the main `bash` process will go bonkers and eat 100 % CPU time.
+Reproduced on four different systems.
+
+A faster way to reproduce this, again inside `screen`; every three seconds,
+write text in 10 MiB bursts to the terminal:
+
+ $ while sleep 3; do date > tmp/tmp && yes "$(date)" | dd bs=1M count=10; done
+
+This one only needs like ten hours, before `bash` starts its busy-loop, from
+which it can only be terminated with `SIGKILL`. At this point, the `term`,
+`screen`, `fifo` processes also have used 40, 52, 25 minutes of CPU time,
+respectively, but appear to be still working fine.
+
+I did not yet start debugging this.
diff --git a/open_issues/crashes_vs_system_load_cpu_load_rpc_load.mdwn b/open_issues/crashes_vs_system_load_cpu_load_rpc_load.mdwn
new file mode 100644
index 00000000..4076d8d0
--- /dev/null
+++ b/open_issues/crashes_vs_system_load_cpu_load_rpc_load.mdwn
@@ -0,0 +1,17 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+IRC, unknown channel, unknown date:
+
+ <antrik> I have a theory
+ <antrik> when the system is under CPU load, the ext2 locking issues are more likely to happen
+ <antrik> I'm under the impression, that when doing something disk-intensive (like a compile job) *considerably* more often causes crashes, when doing *any* other activity in parallel -- be it other compile jobs, or CPU-only activities
+ <antrik> thinking about it, I'm not sure whether CPU-intensive is the decisive criterium, or maybe RPC-intensive...
+ <antrik> CPU load doesn't seem to have any effect -- neither alone, nor in combination with other testcases
diff --git a/open_issues/error_message_disk_full.mdwn b/open_issues/error_message_disk_full.mdwn
new file mode 100644
index 00000000..f72cd66a
--- /dev/null
+++ b/open_issues/error_message_disk_full.mdwn
@@ -0,0 +1,14 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+IRC, unknown channel, unknown date:
+
+ <antrik> /usr/bin/install: writing `/usr/src/gnumach-20060408.dfsg.1/debian/gnumach-dbg/boot/gnumach': (os/kern) memory error
+ <antrik> interesting way to tell that the disk is full ;-)
diff --git a/open_issues/glibc___libc_alloca_cutoff_should_be_lowered.mdwn b/open_issues/glibc___libc_alloca_cutoff_should_be_lowered.mdwn
new file mode 100644
index 00000000..6d1b4bea
--- /dev/null
+++ b/open_issues/glibc___libc_alloca_cutoff_should_be_lowered.mdwn
@@ -0,0 +1,19 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="glibc: __libc_alloca_cutoff should be lowered"]]
+
+[[!tag open_issue_hurd open_issue_glibc]]
+
+Ognyan Kulev, *[LIBC] \_\_libc\_alloca\_cutoff should be lowered*, bug-hurd,
+2003-06-12, <http://lists.gnu.org/archive/html/bug-hurd/2003-06/msg00050.html>
+
+Replace second link (mail.gnu.org) with
+<http://lists.gnu.org/archive/html/bug-hurd/2002-09/msg00143.html>.
diff --git a/open_issues/hurdextras.mdwn b/open_issues/hurdextras.mdwn
new file mode 100644
index 00000000..fa45a763
--- /dev/null
+++ b/open_issues/hurdextras.mdwn
@@ -0,0 +1,100 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+This is about merging some hurdextras stuff into Hurd proper repostitories.
+
+[[!toc levels=2]]
+
+
+# OK
+
+## cvsfs
+
+/!\ Waiting for <https://savannah.gnu.org/support/?107474>.
+
+ * stesie -- OK
+
+## libfuse
+
+/!\ Waiting for <https://savannah.gnu.org/support/?107474>.
+
+ * stesie -- OK
+
+## mboxfs
+
+Tarball-import, plus trivial changes.
+
+ * Ludovic Courtes -- OK
+ * mmenal -- NOK (but trivial) -- OK
+
+## notice
+
+Tarball-import.
+
+ * Wolfgang Jährling <wolfgang@pro-linux.de> -- OK
+
+## run
+
+Tarball-import.
+
+ * Marcus Brinkmann <marcus@gnu.org> -- OK
+ * Manuel Menal <mmenal@hurdfr.org> -- NOK (but trivial) -- OK
+
+
+# Not Interesting
+
+## procfs
+
+Not interesting anymore, but perhaps import for posterity? Likewise for Neal's
+tarball(s).
+
+
+# Not OK
+
+## httpfs
+
+ * Arun V. <arunsark@yahoo.com> -- NOK
+ * Gopika U. K. <gopika78@yahoo.com> -- NOK
+ * mrphython / James A. Morrison <ja2morri@uwaterloo.ca> -- OK
+
+## jfs
+
+ * Sajith T S <sajith@symonds.net> -- NOK
+ * mmenal / Manuel Menal <mmenal@hurdfr.org> -- NOK
+
+## memfs
+
+ * Farid Hajji <farid.hajji@ob.kamp.net> -- NOK
+ * Ludovic Courtes <ludo@chbouib.org> -- OK
+ * mmenal -- NOK (but trivial) -- OK
+
+## pith
+
+[[tschwinge]] has some tarballs, too.
+
+ * John Tobey <jtobey@john-edwin-tobey.org> -- NOK
+ * Manuel Menal <mmenal@hurdfr.org> -- NOK (but trivial) -- OK
+
+## pptop
+
+ * Miles Bader -- OK
+ * Paul Emsley <paule@chem.gla.ac.uk> -- NOK
+ * James Morrison -- OK
+ * Neal Walfield -- OK
+ * Jon Arney <jarney1@cox.net> -- OK
+ * Alfredo Beaumont Sainz <alfredo.beaumont@gmail.com> -- NOK (but trivial) -- OK
+
+## smbfs
+
+ * rocky\_10\_balboa, gscrivano -- NOK (but has assignments various other GNU projects)
+
+## xmlfs
+
+ * mmenal -- NOK
diff --git a/open_issues/libgomp_pthread_attr_setstacksize_pthread_stack_min.mdwn b/open_issues/libgomp_pthread_attr_setstacksize_pthread_stack_min.mdwn
new file mode 100644
index 00000000..817dac76
--- /dev/null
+++ b/open_issues/libgomp_pthread_attr_setstacksize_pthread_stack_min.mdwn
@@ -0,0 +1,17 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!tag open_issue_libpthread]]
+
+IRC, unknown channel, unknown date:
+
+ <azeem> neal: libgomp (GNU's implementation of OpenMP) uses PTHREAD_STACK_MIN, which we do not define apparently
+ <neal> azeem: We have fixed sized stacks.
+ <neal> so the pthread_attr_setstacksize will fail once you define PTHREAD_STACK_MIN)
diff --git a/open_issues/libmachuser_libhurduser_rpc_stubs.mdwn b/open_issues/libmachuser_libhurduser_rpc_stubs.mdwn
new file mode 100644
index 00000000..d069641e
--- /dev/null
+++ b/open_issues/libmachuser_libhurduser_rpc_stubs.mdwn
@@ -0,0 +1,26 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+bug-hurd discussion.
+
+---
+
+IRC, #hurd, 2010-08-12
+
+ <jkoenig> Looking at hurd.git, shouldn't {hurd,include}/Makefile's "all" target do something, and shouldn't pretty much everything depend on them? As it stands it seems that the system headers are used and the potentially newer ones never get built, except maybe on "install" (which is seemingly never called from the top-level Makefile)
+ <jkoenig> I would fix it, but something tells me that maybe it's a feature :-)
+ <antrik> jkoenig: the headers are provided by glibc, along with the stubs
+ <jkoenig> antrik, you mean, even those built from the .defs files in hurd/ ?
+ <antrik> yes
+ <jkoenig> oh, ok then.
+ <antrik> as glibc provides the stubs (in libhurduser), the headers also have to come from there, or they would get out of sync
+ <jkoenig> hmm, shouldn't glibc also provide /usr/share/msgids/hurd.msgids, then?
+ <antrik> jkoenig: not necessarily. the msgids describe what the servers actually understand. if the stubs are missing from libhurduser, that's no reason to leave out the msgids...
+ <jkoenig> ok this makes sense
diff --git a/open_issues/nice_vs_mach_thread_priorities.mdwn b/open_issues/nice_vs_mach_thread_priorities.mdwn
index ed0c6155..e6b68134 100644
--- a/open_issues/nice_vs_mach_thread_priorities.mdwn
+++ b/open_issues/nice_vs_mach_thread_priorities.mdwn
@@ -188,6 +188,10 @@ IRC, #hurd, August 2010
<antrik> cfhammar: from the creator thread IIRC
<pochu> yes
+2010-08-12
+
+ <pochu> my plan is to change the number of priority levels and the threads/tasks priority handling, then add new RPCs to play with them and make the old ones stay compatible, then make glibc use the new RPCs
+
---
Another nice issue: [[nice_changes_priority_of_parent_shell]].
diff --git a/open_issues/nptl.mdwn b/open_issues/nptl.mdwn
index daec8b11..9ff5fb51 100644
--- a/open_issues/nptl.mdwn
+++ b/open_issues/nptl.mdwn
@@ -25,3 +25,13 @@ IRC, #hurd, 2010-07-31
<youpi> while the interface between glibc and libpthread isn't increasing _so_ much
<tschwinge> ... and even less so the interfavce that actual applications are using.
<tschwinge> We'd need to evaluate which benefits NPTL would bring.
+
+---
+
+# Resources
+
+ * <http://www.akkadia.org/drepper/nptl-design.pdf>
+
+ * <http://nptltracetool.sourceforge.net/>
+
+ * <http://posixtest.sourceforge.net/>
diff --git a/open_issues/ogi.mdwn b/open_issues/ogi.mdwn
new file mode 100644
index 00000000..e4372dc0
--- /dev/null
+++ b/open_issues/ogi.mdwn
@@ -0,0 +1,25 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+Go through Ognyan Kulev's (ogi) pages, and archive / hunt down what's still
+interesting.
+
+ * <http://debian.fmi.uni-sofia.bg/~ogi/hurd/links/>
+
+ * <http://debian.fmi.uni-sofia.bg/~ogi/hurd/ext3fs/>
+
+ * SVN ext2fs (ext2fs / large stores doc)
+
+ done
+
+ * ext3fs et al.
+
+ checking copyright situation, also for thesis / w.r.t. university
+ project
diff --git a/open_issues/phython.mdwn b/open_issues/phython.mdwn
new file mode 100644
index 00000000..62f70be0
--- /dev/null
+++ b/open_issues/phython.mdwn
@@ -0,0 +1,13 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+Go through James Morrison's (phython) pages, <http://hurd.dyndns.org/>, (via
+Intrernet Archive Wayback Machine), and archive / hunt down what's still
+interesting.
diff --git a/open_issues/subhurd_error_messages.mdwn b/open_issues/subhurd_error_messages.mdwn
new file mode 100644
index 00000000..46b58fa4
--- /dev/null
+++ b/open_issues/subhurd_error_messages.mdwn
@@ -0,0 +1,15 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!tag open_issue_hurd]]
+
+IRC, unknown channel, unknown date:
+
+ <antrik> BTW, many things in a subhurd print various error messages that are never visible on a normal Hurd...
diff --git a/open_issues/system_crash_nmap.mdwn b/open_issues/system_crash_nmap.mdwn
new file mode 100644
index 00000000..25d9a1c6
--- /dev/null
+++ b/open_issues/system_crash_nmap.mdwn
@@ -0,0 +1,15 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!tag open_issue_gnumach]]
+
+IRC, unknown channel, unknown date:
+
+ <Casper_> Hmm, `nmap hurd -p 1-` seems to reliably make a hurd machine reboot.
diff --git a/open_issues/system_crash_pflocal_fifo.mdwn b/open_issues/system_crash_pflocal_fifo.mdwn
new file mode 100644
index 00000000..1dddc44e
--- /dev/null
+++ b/open_issues/system_crash_pflocal_fifo.mdwn
@@ -0,0 +1,41 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!tag open_issue_gnumach]]
+
+IRC, unknown channel, unknown date:
+
+`cat < /dev/zero | cat > /dev/null` will eventually make the system crash,
+likewise when using a FIFO.
+
+ <antrik> hm... VM activity seems much higher when running fifo than pfinet... may be the cause
+ <antrik> "zero filled" and "page faults" are serveral times higher with pipe than with pfinet
+ <antrik> (cow faults however are about the same...)
+ <antrik> pflocal is about the same as fifo
+
+ <antrik> no, because it usually takes like 20 minutes until it crashes, sometimes much longer
+
+ <antrik> not sure, but the longest so far was in the range of hours IIRC
+
+ <antrik> I think I never tested what happens on "cat /dev/zero >/dev/null"... another thing yet to try
+
+ <antrik> Linux BTW seems to employ some major VM trickery in this case -- dd shows a transfer rate of 10 GB/s...
+
+ <antrik> no, no anomalies in vmstat
+ <antrik> the only observation I made is that number of page faults and some other number rise pretty quickly with pflocal and fifo, but not with pfinet
+ <antrik> I guess that's somehow related to the fact that pfinet doesn't crash -- though I guess the difference is simply that pfinet is way slower...
+ <antrik> (haven't checked that, though)
+
+ <antrik> BTW, I'm not sure you got it right: the test case is "cat /dev/zero|cat >/dev/null", *not* "cat /dev/zero >/dev/null"
+
+ <antrik> OK, "cat /dev/zero|tail -c 1" also crashes, so it's definitely not related to /dev/null
+ <antrik> "dd if=/dev/zero|tail -c 1" crashes as well
+ <antrik> but "tail -c 1 /dev/zero" doesn't seem to
+ <antrik> cool... running multiple instances of the pipe test also considerably speeds up the crash
diff --git a/open_issues/thread-cancel_c_55_hurd_thread_cancel_assertion___spin_lock_locked_ss_critical_section_lock.mdwn b/open_issues/thread-cancel_c_55_hurd_thread_cancel_assertion___spin_lock_locked_ss_critical_section_lock.mdwn
new file mode 100644
index 00000000..72af3f35
--- /dev/null
+++ b/open_issues/thread-cancel_c_55_hurd_thread_cancel_assertion___spin_lock_locked_ss_critical_section_lock.mdwn
@@ -0,0 +1,41 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+[[!meta title="ext2fs.static: thread-cancel.c:55: hurd_thread_cancel: Assertion '! __spin_lock_locked (&ss->critical_section_lock)'"]]
+
+[[!tag open_issue_hurd]]
+
+<http://bugs.debian.org/46859>, <http://bugs.debian.org/195360>
+
+IRC, unknown channel, unknown date:
+
+ <youpi> azeem, marcus: ext2fs.static: thread-cancel.c:55: hurd_thread_cancel: Assertion '! __spin_lock_locked (&ss->critical_section_lock)' failed
+ <youpi> I actually don't understand this assertion
+ <youpi> it's just before __spin_lock (&ss->critical_section_lock);
+ <youpi> why should one check that a lock is free before taking it ?
+ <youpi> just the same in hurdexec.c
+ <youpi> (no, ss is not our own sigstate, so it's not safe to assume no other path can take it)
+ <youpi> there's another one in sysdeps/mach/hurd/spawni.c
+ <youpi> and jmp-unwind.c
+ <antrik> youpi: why do you think it's nonsense?... the fact that we take the lock (so we can't be interrupted) doesn't mean we are willing to wait for others to release the lock... maybe the code path should never be reached while others have a lock, or something
+ <youpi> then it's useless to take the lock
+ <youpi> "we take the lock (so we can't be interrupted)": no, it's not _our_ lock here, it's the lock of the thread we want to cancel
+ <antrik> what exactly is cancelling a thread?... (sorry, I don't really have experience with thread programming)
+ <youpi> ~= killing it
+ <antrik> well, we take the lock so nobody can mess with the thread while we are cancelling it, no?...
+ <youpi> yes
+ <youpi> that is fine
+ <youpi> but checking that the lock is free before taking it doesn't make sense
+ <youpi> why nobody should be able to take the lock ?
+ <youpi> and if nobody is, why do we take it ? (since nobody would be able to take it)
+ <antrik> well, maybe after taking the lock, we do some action that might result in others trying to take it...
+ <youpi> nope: look at the code :)
+ <youpi> or maybe the cancel_hook, but I really doubt it
+
diff --git a/open_issues/unit_testing.mdwn b/open_issues/unit_testing.mdwn
new file mode 100644
index 00000000..01ed02b3
--- /dev/null
+++ b/open_issues/unit_testing.mdwn
@@ -0,0 +1,43 @@
+[[!meta copyright="Copyright © 2010 Free Software Foundation, Inc."]]
+
+[[!meta license="""[[!toggle id="license" text="GFDL 1.2+"]][[!toggleable
+id="license" text="Permission is granted to copy, distribute and/or modify this
+document under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no Invariant
+Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license
+is included in the section entitled [[GNU Free Documentation
+License|/fdl]]."]]"""]]
+
+A collection of thoughts with respect to unit testing.
+
+We definitely want to add unit test suites to our code base.
+
+We should select a tool that we like to use, and that is supported (not
+abandoned).
+
+ * [DejaGnu](http://www.gnu.org/software/dejagnu/) /
+ [Expect](http://expect.nist.gov/)
+
+ * used by GCC, GDB, binutils, etc.
+
+ * glibc has a home-grown system (Makefile-based)
+
+ * [check](http://check.sourceforge.net/)
+
+ * used by some GNU packages, for example GNU PDF (Jose E. Marchesi)
+
+ * CodeSourcery's [QMTest](http://www.codesourcery.com/qmtest)
+
+ * useb by?
+
+ * documentation:
+
+ * <http://www.codesourcery.com/public/qmtest/whitepaper.pdf>
+
+ * <http://www.python.org/workshops/2002-02/papers/01/index.htm>
+
+ * <http://gcc.gnu.org/ml/gcc/2002-05/msg01978.html>
+
+ * <http://www.codesourcery.com/public/qmtest/qmtest-snapshot/share/doc/qmtest/html/tutorial/index.html>
+
+ * <http://www.codesourcery.com/public/qmtest/qmtest-snapshot/share/doc/qmtest/html/manual/index.html>
diff --git a/public_hurd_boxen/installation/snubber.mdwn b/public_hurd_boxen/installation/snubber.mdwn
index 2fd52d4f..68e0d619 100644
--- a/public_hurd_boxen/installation/snubber.mdwn
+++ b/public_hurd_boxen/installation/snubber.mdwn
@@ -10,13 +10,22 @@ License|/fdl]]."]]"""]]
# Additional Packages
- apache2-mpm-worker build-essential git-core gitweb ikiwiki inetutils-inetd
+Before 2010-08-12, we've been using apache2-mpm-worker, but that brought
+the system to its knees too often, leading to a un-syncable rootfs, etc.
+Let's see how apache2-mpm-prefork behaves.
+
+ apache2-mpm-prefork build-essential git-core gitweb ikiwiki inetutils-inetd
less libtext-csv-perl netcat nullmailer perlmagick screen texinfo
Yet more:
* libemail-send-perl (for my *sendmail vs. ikiwiki* patch)
+ * libsearch-xapian-perl xapian-omega (for ikiwiki's search plugin)
+
+ * libyaml-perl (for ikiwiki's YAML field plugins)
+
+
## [[open_issues/syslog]]
$ find /etc/rc*/ | grep syslog | sudo xargs rm
diff --git a/sidebar.mdwn b/sidebar.mdwn
index 159104fc..d283436b 100644
--- a/sidebar.mdwn
+++ b/sidebar.mdwn
@@ -14,6 +14,7 @@ Welcome to... [[!img /logo/boxes-redrawn.png link=/logo]] ... the GNU Hurd!
* **[[Home|/index]]**
* **[[Community]]**
* **[[Documentation]]**
+ * *[[FAQ]]*
* **[[Getting Help]]**
* **[[Open Issues]]**
diff --git a/user/jkoenig.mdwn b/user/jkoenig.mdwn
index 6045f936..247d61cb 100644
--- a/user/jkoenig.mdwn
+++ b/user/jkoenig.mdwn
@@ -138,7 +138,7 @@ installer kindof works, with documented manual intervention required**
* The segfault will have to be sorted out. (postponed)
* (./) "Fix" the swap situation. (2010-07-08)
- * The device_close() libstore patch
+ * The device\_close() libstore patch
had the unfortunate effect of making swapon fail,
since the device it activates has to be kept open.
* add options for MAKEDEV and setup-devices
@@ -151,7 +151,7 @@ installer kindof works, with documented manual intervention required**
* There was some amount of hurd support already
(namely, activating the interface by replacing the socket translator)
* However, this code started an active translator with
- di_exec_shell_log("settrans -a ...),
+ di\_exec\_shell\_log("settrans -a ...),
which stalled as a consequence of it capturing libdi's pipe
as its standard output.
* Network devices must be probed by trying to open Mach devices
@@ -207,13 +207,13 @@ installer kindof works, with documented manual intervention required**
* Make hurd.postinst not touch them on initial install.
* (./) Fix mach-defpager for file and part stores on larger devices
- * Use DEVICE_GET_RECORDS instead of DEVICE_GET_SIZE, which overflows an int
+ * Use DEVICE\_GET\_RECORDS instead of DEVICE\_GET\_SIZE, which overflows an int
(2010-07-22)
**Milestone (2010-07-22):
installer works but it's still somewhat ugly and broken**
-* (./) Ship the uft-8 font for the hurd console
+* (./) Ship the UTF-8 font for the hurd console
(2010-07-22)
* Upload a version of bogl with youpi's patch for Hurd.
(see [[!debbug 589987]])
@@ -231,6 +231,37 @@ installer works but it's still somewhat ugly and broken**
* localechooser: set the language display level to 3
when using the hurd console.
+* (./) **busybox**: cross-platform package uploaded to experimental
+ (2010-08-03?)
+ * Aurelien Jarno updated the packaging to busybox 1.17.1,
+ fixed a whole lot of bugs,
+ and uploaded a new package with both our changes;
+ * most patches adopted upstream, and included in the new package;
+ * (u)mount/swaponoff ported to kFreeBSD;
+ * per-OS configuration overrides.
+
+* (./) Update custom packages to the latest versions
+ and send updated patches to the BTS
+ (2010-08-11)
+ * updated partman-base to choose a default filesystem in debian/rules
+ rather than at runtime,
+ as suggested by Aurelien Jarno in [[!debbug 586870]]
+ * patch submitted for debian-installer-utils
+ ([[!debbug 592684]]).
+ * patch submitted for locale-chooser
+ ([[!debbug 592690]]).
+ * debootstrap, grub-installer and finish-install not yet submitted,
+ since the details may still change.
+
+* (./) **partman-target**: fix fstab creation
+ (2010-08-11)
+ * See [[!debbug 592671]]
+ * debian/rules: set `partman/mount_style` to `traditional` on Hurd.
+ * finish.d/create\_fstab\_header: add a Hurd case.
+
+* **rootskel**: FTBFS on Hurd and other quirks
+ (to be fixed very soon)
+
* **d-i/installer/build**: (expected soon)
* publish the patch I use
* sort out the changes suitable for inclusion
@@ -265,6 +296,7 @@ installer works but it's still somewhat ugly and broken**
though the kFreeBSD people will need them
* **partman**: further adjustments
+ * partman-base: handle /dev/hd?s* in lib/base.h
* hide irrelevant mount options? (sync, relatime)
* Network configuration on the installed system.