[Qemu-devel] SCSI externals [PATCH] revisited

2006-12-12 Thread Chuck Brazie

Let me try this again from Thunderbird:

Here is a patch that merges the externals for IDE and SCSI with a --disk
as Paul requested. Let me know if you want different keywords.

Chuck



diff -Nuar -X diff_excludes /hg-qemu/hw/pc.c /qemu-new/hw/pc.c
--- /hg-qemu/hw/pc.c2006-10-09 10:30:32.0 -0400
+++ /qemu-new/hw/pc.c   2006-12-04 13:38:50.0 -0500
@@ -710,23 +710,20 @@
 if (i440fx_state) {
 i440fx_init_memory_mappings(i440fx_state);
 }
-#if 0
-/* ??? Need to figure out some way for the user to
-   specify SCSI devices.  */
 if (pci_enabled) {
 void *scsi;
-BlockDriverState *bdrv;
-
-scsi = lsi_scsi_init(pci_bus, -1);
-bdrv = bdrv_new(scsidisk);
-bdrv_open(bdrv, scsi_disk.img, 0);
-lsi_scsi_attach(scsi, bdrv, -1);
-bdrv = bdrv_new(scsicd);
-bdrv_open(bdrv, scsi_cd.iso, 0);
-bdrv_set_type_hint(bdrv, BDRV_TYPE_CDROM);
-lsi_scsi_attach(scsi, bdrv, -1);
+if (scsi_hba_lsi  0) {
+if (!(scsi = lsi_scsi_init(pci_bus, -1))){
+ exit(1);
+}
+for(i = 0; i  MAX_SCSI_DISKS; i++) {
+if (scsi_disks_info[i].adapter == SCSI_LSI_53C895A 
+scsi_disks_info[i].device_type != SCSI_NONE) {
+lsi_scsi_attach(scsi, bs_scsi_table[i], 
scsi_disks_info[i].id);
+}
+}
+}
 }
-#endif
 }
 
 static void pc_init_pci(int ram_size, int vga_ram_size, int boot_device,
diff -Nuar -X diff_excludes /hg-qemu/vl.c /qemu-new/vl.c
--- /hg-qemu/vl.c   2006-11-29 14:34:33.0 -0500
+++ /qemu-new/vl.c  2006-12-05 15:12:11.0 -0500
@@ -109,6 +109,8 @@
 /* XXX: use a two level table to limit memory usage */
 #define MAX_IOPORTS 65536
 
+#define DISK_OPTIONS_SIZE 256
+
 const char *bios_dir = CONFIG_QEMU_SHAREDIR;
 char phys_ram_file[1024];
 void *ioport_opaque[MAX_IOPORTS];
@@ -119,6 +121,9 @@
 BlockDriverState *bs_table[MAX_DISKS + 1], *fd_table[MAX_FD];
 /* point to the block driver where the snapshots are managed */
 BlockDriverState *bs_snapshots;
+BlockDriverState *bs_scsi_table[MAX_SCSI_DISKS]; 
+SCSIDiskInfo scsi_disks_info[MAX_SCSI_DISKS];
+int scsi_hba_lsi; /* Count of scsi disks/cdrom using this lsi adapter */
 int vga_ram_size;
 int bios_size;
 static DisplayState display_state;
@@ -3736,7 +3741,175 @@
 term_printf(  %s\n, vc-info_str);
 }
 }
- 
+
+ /* Parse IDE and SCSI disk options */
+static int disk_options_init(int num_ide_disks, 
+ char ide_disk_options[][DISK_OPTIONS_SIZE],
+ int snapshot,
+ int num_scsi_disks,
+ char scsi_disk_options[][DISK_OPTIONS_SIZE],
+ int cdrom_index,
+ int cyls, 
+ int heads, 
+ int secs, 
+ int translation
+ )
+{
+char buf[256];
+char dev_name[64];
+int id, i, j;
+int cdrom_device;
+int ide_cdrom_created = 0;
+int scsi_index;
+scsi_host_adapters temp_adapter;
+
+/* Process any IDE disks/cdroms */
+for (i=0; i num_ide_disks; i++) {
+
+for (j=0; jMAX_DISKS; j++) {
+if (ide_disk_options[j][0] == '\0') continue;
+
+if (get_param_value(buf, sizeof(buf),type,ide_disk_options[j])) {
+if (!strcmp(buf, disk) ) {
+cdrom_device = 0;
+}else if (!strcmp(buf, cdrom) ) {
+cdrom_device = 1;
+ide_cdrom_created = 1;
+} else {
+fprintf(stderr, qemu: invalid IDE disk type= value: 
%s\n, buf);
+return -1;
+}
+}
+else {
+cdrom_device = 0;
+}
+
+if (cdrom_device) {
+snprintf(dev_name, sizeof(dev_name), cdrom%c, i);
+}else{
+snprintf(dev_name, sizeof(dev_name), hd%c, i + 'a');
+}
+
+if (!(get_param_value(buf, sizeof(buf),img,ide_disk_options[j]) 
) ) {
+fprintf(stderr, qemu: missing IDE disk img= value.\n);
+return -1;
+}
+
+if (!(bs_table[i] = bdrv_new(dev_name) ) ){
+fprintf(stderr, qemu: unable to create new block device 
for:%s\n,dev_name);
+return -1;
+}
+
+if (cdrom_device) {
+bdrv_set_type_hint(bs_table[i], BDRV_TYPE_CDROM);
+}
+
+if (bdrv_open(bs_table[i], buf, snapshot ? BDRV_O_SNAPSHOT : 0)  
0) {
+fprintf(stderr, qemu: could not open hard disk image: '%s'\n,
+buf);
+return -1;
+}
+if (i == 0  cyls != 0) {
+

Re: [Qemu-devel] SCSI externals [PATCH] revisited

2006-12-12 Thread Hetz Ben Hamo

Chuck,

IMHO, in order to be able to use such patches as yours (big patches),
I think it would be the best to attached them as compressed files
(gzip, bz2).
That way, almost any mailer knows how to handle attachment without
scrambling the patch formatting

Thanks,
Hetz

On 12/12/06, Chuck Brazie [EMAIL PROTECTED] wrote:

Let me try this again from Thunderbird:

Here is a patch that merges the externals for IDE and SCSI with a --disk
as Paul requested. Let me know if you want different keywords.

Chuck





diff -Nuar -X diff_excludes /hg-qemu/hw/pc.c /qemu-new/hw/pc.c
--- /hg-qemu/hw/pc.c2006-10-09 10:30:32.0 -0400
+++ /qemu-new/hw/pc.c   2006-12-04 13:38:50.0 -0500
@@ -710,23 +710,20 @@
 if (i440fx_state) {
 i440fx_init_memory_mappings(i440fx_state);
 }
-#if 0
-/* ??? Need to figure out some way for the user to
-   specify SCSI devices.  */
 if (pci_enabled) {
 void *scsi;
-BlockDriverState *bdrv;
-
-scsi = lsi_scsi_init(pci_bus, -1);
-bdrv = bdrv_new(scsidisk);
-bdrv_open(bdrv, scsi_disk.img, 0);
-lsi_scsi_attach(scsi, bdrv, -1);
-bdrv = bdrv_new(scsicd);
-bdrv_open(bdrv, scsi_cd.iso, 0);
-bdrv_set_type_hint(bdrv, BDRV_TYPE_CDROM);
-lsi_scsi_attach(scsi, bdrv, -1);
+if (scsi_hba_lsi  0) {
+if (!(scsi = lsi_scsi_init(pci_bus, -1))){
+ exit(1);
+}
+for(i = 0; i  MAX_SCSI_DISKS; i++) {
+if (scsi_disks_info[i].adapter == SCSI_LSI_53C895A 
+scsi_disks_info[i].device_type != SCSI_NONE) {
+lsi_scsi_attach(scsi, bs_scsi_table[i], 
scsi_disks_info[i].id);
+}
+}
+}
 }
-#endif
 }

 static void pc_init_pci(int ram_size, int vga_ram_size, int boot_device,
diff -Nuar -X diff_excludes /hg-qemu/vl.c /qemu-new/vl.c
--- /hg-qemu/vl.c   2006-11-29 14:34:33.0 -0500
+++ /qemu-new/vl.c  2006-12-05 15:12:11.0 -0500
@@ -109,6 +109,8 @@
 /* XXX: use a two level table to limit memory usage */
 #define MAX_IOPORTS 65536

+#define DISK_OPTIONS_SIZE 256
+
 const char *bios_dir = CONFIG_QEMU_SHAREDIR;
 char phys_ram_file[1024];
 void *ioport_opaque[MAX_IOPORTS];
@@ -119,6 +121,9 @@
 BlockDriverState *bs_table[MAX_DISKS + 1], *fd_table[MAX_FD];
 /* point to the block driver where the snapshots are managed */
 BlockDriverState *bs_snapshots;
+BlockDriverState *bs_scsi_table[MAX_SCSI_DISKS];
+SCSIDiskInfo scsi_disks_info[MAX_SCSI_DISKS];
+int scsi_hba_lsi; /* Count of scsi disks/cdrom using this lsi adapter */
 int vga_ram_size;
 int bios_size;
 static DisplayState display_state;
@@ -3736,7 +3741,175 @@
 term_printf(  %s\n, vc-info_str);
 }
 }
-
+
+ /* Parse IDE and SCSI disk options */
+static int disk_options_init(int num_ide_disks,
+ char ide_disk_options[][DISK_OPTIONS_SIZE],
+ int snapshot,
+ int num_scsi_disks,
+ char scsi_disk_options[][DISK_OPTIONS_SIZE],
+ int cdrom_index,
+ int cyls,
+ int heads,
+ int secs,
+ int translation
+ )
+{
+char buf[256];
+char dev_name[64];
+int id, i, j;
+int cdrom_device;
+int ide_cdrom_created = 0;
+int scsi_index;
+scsi_host_adapters temp_adapter;
+
+/* Process any IDE disks/cdroms */
+for (i=0; i num_ide_disks; i++) {
+
+for (j=0; jMAX_DISKS; j++) {
+if (ide_disk_options[j][0] == '\0') continue;
+
+if (get_param_value(buf, sizeof(buf),type,ide_disk_options[j])) {
+if (!strcmp(buf, disk) ) {
+cdrom_device = 0;
+}else if (!strcmp(buf, cdrom) ) {
+cdrom_device = 1;
+ide_cdrom_created = 1;
+} else {
+fprintf(stderr, qemu: invalid IDE disk type= value: 
%s\n, buf);
+return -1;
+}
+}
+else {
+cdrom_device = 0;
+}
+
+if (cdrom_device) {
+snprintf(dev_name, sizeof(dev_name), cdrom%c, i);
+}else{
+snprintf(dev_name, sizeof(dev_name), hd%c, i + 'a');
+}
+
+if (!(get_param_value(buf, sizeof(buf),img,ide_disk_options[j]) 
) ) {
+fprintf(stderr, qemu: missing IDE disk img= value.\n);
+return -1;
+}
+
+if (!(bs_table[i] = bdrv_new(dev_name) ) ){
+fprintf(stderr, qemu: unable to create new block device 
for:%s\n,dev_name);
+return -1;
+}
+
+if (cdrom_device) {
+bdrv_set_type_hint(bs_table[i], BDRV_TYPE_CDROM);
+}
+
+ 

[Qemu-devel] NBD server for QEMU images

2006-12-12 Thread Salvador Fandiño
Hi,

The patch available from http://qemu-forum.ipi.fi/viewtopic.php?t=2718 adds a 
new utility, qemu-nbds, that implements a NBD server (see http://nbd.sf.net) 
for QEMU images.

Using this utility it is posible to mount images in any format supported by 
QEMU.

Unfortunatelly, only read access works (locally) due to a limitation on the 
Linux Kernel :-(

BTW, only tested on Linux!

Regards,

- Salvador




 

Cheap talk?
Check out Yahoo! Messenger's low PC-to-Phone call rates.
http://voice.yahoo.com


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] [PATCH] Simplily linux-user/path.c

2006-12-12 Thread Kirill Shutemov

I have no ideas why path.c is so complex. Any? In the attachment
rewritten version. It has tested with qemu-arm.

With old version I had the problem. It hangs due loop of symlinks. :(


path.c.patch
Description: Binary data
___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] Linux Kernel to Include KVM Virtualization

2006-12-12 Thread Ricardo Almeida

Hi,

Just saw this on slashdot
(http://linux.slashdot.org/article.pl?sid=06/12/12/0135240). From the
news:

In a fashion comparable to that of Xen a modified QEMU is used for
the supportive emulation of typical PC components of the virtual
machines

So, when it will run with a non-modified QEMU? ;)

I've looked at KVM homepage (http://kvm.sourceforge.net/) and it's a
kernel module, so I think this can be some kind of replacement for
KQEmu, no?
Will QEmu/KQEmu support this?

Thanks for the great work,
Ricardo Almeida


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] NBD server for QEMU images

2006-12-12 Thread Martin Guy

The patch available from http://qemu-forum.ipi.fi/viewtopic.php?t=2718
adds a new utility, qemu-nbds, that implements a NBD server


I have been using nbd volumes mounted from inside qemu for filestore
and for swap, both read-write, served from files and from partitions,
with the unmodified standard nbd-server (debian testing version) for
intensive work and it has been faster and more reliable than NFS (not
that that's saying much).

The only thing that doesn't work is the -swap option, which just
hangs, but that proves not to be necessary when swapping onto nbd host
volume from qemu-land, even when stress-testing it.

What problem is solved by a specially modified nbd server?

   M


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Linux Kernel to Include KVM Virtualization

2006-12-12 Thread Oliver Gerlich

Ricardo Almeida wrote:

Hi,

Just saw this on slashdot
(http://linux.slashdot.org/article.pl?sid=06/12/12/0135240). From the
news:

In a fashion comparable to that of Xen a modified QEMU is used for
the supportive emulation of typical PC components of the virtual
machines

So, when it will run with a non-modified QEMU? ;)

I've looked at KVM homepage (http://kvm.sourceforge.net/) and it's a
kernel module, so I think this can be some kind of replacement for
KQEmu, no?
Will QEmu/KQEmu support this?


Isn't KVM more like a kqemu which uses VT/Pacifica so that it runs 
processes a bit more native? That's how I understood it...


Though I'm wondering why this project apparently didn't announce itself 
on this list, at least for general information. Or, this can also be 
seen as a hint that Qemu is _so_ well-structured and well-documented 
that people are able to use its code without any questions ;-)




Thanks for the great work,
Ricardo Almeida


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel



Regards,
Oliver Gerlich


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


RE: [Qemu-devel] Linux Kernel to Include KVM Virtualization

2006-12-12 Thread Dor Laor
Hi, 
My name is Dor and I'm one of the contributors for the KVM.

 Ricardo Almeida wrote:
  Hi,
 
  Just saw this on slashdot
  (http://linux.slashdot.org/article.pl?sid=06/12/12/0135240). From
the
  news:
 
  In a fashion comparable to that of Xen a modified QEMU is used for
  the supportive emulation of typical PC components of the virtual
  machines
 
  So, when it will run with a non-modified QEMU? ;)

We'd like to see this happened as soon as possible. The problem is
prioritizing and the important fact that the KVM's interface with the
user/Qemu is not yet finalized and changes frequently.
It can be merged or parallel to the Kqemu api, although it won't be
identical. 

 
  I've looked at KVM homepage (http://kvm.sourceforge.net/) and it's a
  kernel module, so I think this can be some kind of replacement for
  KQEmu, no?
  Will QEmu/KQEmu support this?
 
 Isn't KVM more like a kqemu which uses VT/Pacifica so that it runs
 processes a bit more native? That's how I understood it...

The differences between the 2 are the following:
- KVM runs the code on the native CPU using VT/Pacifica. This is true
both for user mode code and kernel mode code of the guest.
- KVM cannot run over platform without VT/Pacfica support while Kqemu
can.
- The Kqemu does not use VT/Pacifica abilities. It runs the user mode
guest code natively while the kernel mode guest code is emulated within
the kernel.


 Though I'm wondering why this project apparently didn't announce
itself

Sorry for that, some of the Qemu developers did know about the KVM 
Qemu interop. Since the integration between the two didn't start yet at
least it is not that late :)

 on this list, at least for general information. Or, this can also be
 seen as a hint that Qemu is _so_ well-structured and well-documented
 that people are able to use its code without any questions ;-)

:)
 
 
  Thanks for the great work,
  Ricardo Almeida
 
 
  ___
  Qemu-devel mailing list
  Qemu-devel@nongnu.org
  http://lists.nongnu.org/mailman/listinfo/qemu-devel
 
 
 Regards,
 Oliver Gerlich
 
 
 ___
 Qemu-devel mailing list
 Qemu-devel@nongnu.org
 http://lists.nongnu.org/mailman/listinfo/qemu-devel


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Anthony Liguori

Salvador Fandiño wrote:

Hi,

The patch available from http://qemu-forum.ipi.fi/viewtopic.php?t=2718 adds a 
new utility, qemu-nbds, that implements a NBD server (see http://nbd.sf.net) 
for QEMU images.

Using this utility it is posible to mount images in any format supported by 
QEMU.

Unfortunatelly, only read access works (locally) due to a limitation on the 
Linux Kernel :-(


http://hg.codemonkey.ws/qemu-nbd/

And write access works for me.  What's this limitation you speak of?

Regards,

Anthony Liguori


BTW, only tested on Linux!

Regards,

- Salvador




 


Cheap talk?
Check out Yahoo! Messenger's low PC-to-Phone call rates.
http://voice.yahoo.com




___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] Re: SCSI externals [PATCH] revisited

2006-12-12 Thread Anthony Liguori

Hetz Ben Hamo wrote:

Chuck,

IMHO, in order to be able to use such patches as yours (big patches),
I think it would be the best to attached them as compressed files
(gzip, bz2).


Ugh.  It's a text plain attachment.  No mailers will scramble that.  A 
lot of mailing lists will drop any non text/plain attachments to prevent 
works so compressing is probably not a good idea.  If the patch is 
really really big, pointing to an external URL would probably work best.


The problem with the previous patch was Lotus Notes.  Lotus is a bit 
brain dead when it comes to sending attachments (it really sends mail 
via a special format and then converts it to rfc822).  The result is 
that all attachments are application/octet-stream.


Regards,

Anthony Liguori


That way, almost any mailer knows how to handle attachment without
scrambling the patch formatting

Thanks,
Hetz



___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Paul Brook
On Tuesday 12 December 2006 17:00, Salvador Fandino wrote:
 Martin Guy wrote:
  The patch available from http://qemu-forum.ipi.fi/viewtopic.php?t=2718
  adds a new utility, qemu-nbds, that implements a NBD server
 
  I have been using nbd volumes mounted from inside qemu for filestore
  and for swap, both read-write, served from files and from partitions,
  with the unmodified standard nbd-server (debian testing version) for
  intensive work and it has been faster and more reliable than NFS (not
  that that's saying much).
 
  The only thing that doesn't work is the -swap option, which just
  hangs, but that proves not to be necessary when swapping onto nbd host
  volume from qemu-land, even when stress-testing it.
 
  What problem is solved by a specially modified nbd server?

 It serves disk images in any format QEMU can handle, for instance, qcow
 images.

 It's mostly intended to be used for accessing the files inside QEMU disk
 images locally, without having to launch a virtual machine and accessing
 then from there.

mount -o loop does this.

Paul


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Daniel Jacobowitz
On Tue, Dec 12, 2006 at 04:58:32PM +, Paul Brook wrote:
 On Tuesday 12 December 2006 17:00, Salvador Fandino wrote:
  It serves disk images in any format QEMU can handle, for instance, qcow
  images.
 
  It's mostly intended to be used for accessing the files inside QEMU disk
  images locally, without having to launch a virtual machine and accessing
  then from there.
 
 mount -o loop does this.

How is everybody missing the point? :-)  mount -o loop doesn't mount
qcow images.

-- 
Daniel Jacobowitz
CodeSourcery


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


RE : Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Sylvain Petreolle
   It's mostly intended to be used for accessing the files inside QEMU disk
   images locally, without having to launch a virtual machine and accessing
   then from there.
  
  mount -o loop does this.
 
 How is everybody missing the point? :-)  mount -o loop doesn't mount
 qcow images.
 
Would be that difficult to write a qcow fs module ?

Kind regards,
Sylvain Petreolle (aka Usurp)
--- --- --- --- --- --- --- --- --- --- --- --- ---
Run your favorite Windows apps with free ReactOS : http://www.reactos.org
Listen to non-DRMised Music: http://www.jamendo.com





___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Paul Brook
   mount -o loop does this.
 
  How is everybody missing the point? :-)  mount -o loop doesn't mount
  qcow images.

 Would be that difficult to write a qcow fs module ?

qcow is an image format, not a filesystem.
I'd guess it should be possible to use the device-mapper framework to do this. 
I've ho idea how hard this is in practice.

Paul


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


RE : [Qemu-devel] Re: SCSI externals [PATCH] revisited

2006-12-12 Thread Sylvain Petreolle

--- Anthony Liguori [EMAIL PROTECTED] a écrit :

 Hetz Ben Hamo wrote:
  Chuck,
  
  IMHO, in order to be able to use such patches as yours (big patches),
  I think it would be the best to attached them as compressed files
  (gzip, bz2).
 
 Ugh.  It's a text plain attachment.  No mailers will scramble that.  A 
 lot of mailing lists will drop any non text/plain attachments to prevent 
 works so compressing is probably not a good idea.  If the patch is 
 really really big, pointing to an external URL would probably work best.
 
I have to disagree here, many mailers do scramble text attachments,
especially with long lines and TABs / special characters.

Kind regards,
Sylvain Petreolle (aka Usurp)
--- --- --- --- --- --- --- --- --- --- --- --- ---
Run your favorite Windows apps with free ReactOS : http://www.reactos.org
Listen to non-DRMised Music: http://www.jamendo.com





___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: RE : Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Johannes Schindelin
Hi,

On Tue, 12 Dec 2006, Sylvain Petreolle wrote:

It's mostly intended to be used for accessing the files inside QEMU disk
images locally, without having to launch a virtual machine and accessing
then from there.
   
   mount -o loop does this.
  
  How is everybody missing the point? :-)  mount -o loop doesn't mount
  qcow images.
  
 Would be that difficult to write a qcow fs module ?

It would be _more_ difficult. Although I would have done it as a FUSE 
module, just to learn how to do it.

Ciao,
Dscho



___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: RE : Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Daniel Jacobowitz
On Tue, Dec 12, 2006 at 06:33:22PM +0100, Sylvain Petreolle wrote:
It's mostly intended to be used for accessing the files inside QEMU disk
images locally, without having to launch a virtual machine and accessing
then from there.
   
   mount -o loop does this.
  
  How is everybody missing the point? :-)  mount -o loop doesn't mount
  qcow images.
  
 Would be that difficult to write a qcow fs module ?

Probably not, but I think using nbd for it is much nicer.  I think
there would be trouble with partitionable devices, though.

-- 
Daniel Jacobowitz
CodeSourcery


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Mark Williamson
   It's mostly intended to be used for accessing the files inside QEMU
   disk images locally, without having to launch a virtual machine and
   accessing then from there.
 
  mount -o loop does this.

 How is everybody missing the point? :-)  mount -o loop doesn't mount
 qcow images.

Using dm-userspace (a device mapper with mappings generated by a userspace 
daemon instead of a kernel module) I believe it is possible to mount all 
kinds of weird and wonderful things - including things like qcow.

The patches for dm-userspace are floating around, I think on the device mapper 
and Xen developer's mailing lists.

Of course, this is a Linux-specific solution so an NBD server is probably 
still useful (can other OSes mount NBD?  I assume so...?).

In principle, you could use the NDB server to host storage for physical 
machines too, right?  For instance you could opt for a fairly thin setup 
where all user disks are stored separately in qcow format to save space.  
This might be nice for some users of centralised storage systems...

Cheers,
Mark

-- 
Dave: Just a question. What use is a unicyle with no seat?  And no pedals!
Mark: To answer a question with a question: What use is a skateboard?
Dave: Skateboards have wheels.
Mark: My wheel has a wheel!


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


RE: RE : Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Paul Robinson
 It's mostly intended to be used for accessing the files inside

 QEMU disk images locally, without having to launch a virtual 
 machine and accessing then from there.

mount -o loop does this.
   
   How is everybody missing the point? :-)  mount -o loop doesn't
mount 
   qcow images.
   
  Would be that difficult to write a qcow fs module ?

 It would be _more_ difficult. Although I would have done it as a FUSE
module, just to learn how to do it.

Fuse can do one half of the job and you might want to look at
http://www.smallworks.com/~jim/fsimage/  dated 23-Feb-2005
It's a program to copy files from various types of disk image. (I
haven't tried it).
I found it at
http://www.kidsquid.com/cgi-bin/moin.cgi/FrequentlyAskedQuestions

Cheers,
Paul R.


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] Re: RE : Re: Re: NBD server for QEMU images

2006-12-12 Thread Salvador Fandino
Daniel Jacobowitz wrote:
 On Tue, Dec 12, 2006 at 06:33:22PM +0100, Sylvain Petreolle wrote:
 It's mostly intended to be used for accessing the files inside QEMU disk
 images locally, without having to launch a virtual machine and accessing
 then from there.
 mount -o loop does this.
 How is everybody missing the point? :-)  mount -o loop doesn't mount
 qcow images.

 Would be that difficult to write a qcow fs module ?
 
 Probably not, but I think using nbd for it is much nicer.  I think
 there would be trouble with partitionable devices, though.

right now, you can use -o offset and -s size to serve a partition
inside a partitioned disk image. And you can use fdisk or a similar tool
 to examine the partition table (they work on /dev/nbd0).

I am also looking for some working code to parse the MBR to incorporate
it in qemu-nbds (something as libparted but simpler), so it would be
possible to just indicate the partition number to serve.

- Salva



___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] [PATCH] Fix userland ELF load failure when no .bss is present

2006-12-12 Thread Thiemo Seufer
Hello All,

the appended patch fixes the case where a ELF Linux binary has a
zero-sized .bss, or none at all.


Thiemo


Index: qemu-work/linux-user/elfload.c
===
--- qemu-work.orig/linux-user/elfload.c 2006-12-12 18:25:00.0 +
+++ qemu-work/linux-user/elfload.c  2006-12-12 18:33:08.0 +
@@ -553,10 +553,13 @@
 /* We need to explicitly zero any fractional pages after the data
section (i.e. bss).  This would contain the junk from the file that
should not be in memory. */
-static void padzero(unsigned long elf_bss)
+static void padzero(unsigned long elf_bss, unsigned long last_bss)
 {
 unsigned long nbyte;
 
+   if (elf_bss = last_bss)
+   return;
+
 /* XXX: this is really a hack : if the real host page size is
smaller than the target page size, some pages after the end
of the file may not be mapped. A better fix would be to
@@ -798,7 +801,7 @@
 * that there are zeromapped pages up to and including the last
 * bss page.
 */
-   padzero(elf_bss);
+   padzero(elf_bss, last_bss);
elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* 
What we have mapped so far */
 
/* Map the last of the bss segment */
@@ -1227,7 +1230,7 @@
sections */
 set_brk(elf_bss, elf_brk);
 
-padzero(elf_bss);
+padzero(elf_bss, elf_brk);
 
 #if 0
 printf((start_brk) %x\n , info-start_brk);


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] Re: RE : Re: Re: NBD server for QEMU images

2006-12-12 Thread Salvador Fandino
Sylvain Petreolle wrote:
 It's mostly intended to be used for accessing the files inside QEMU disk
 images locally, without having to launch a virtual machine and accessing
 then from there.
 mount -o loop does this.
 How is everybody missing the point? :-)  mount -o loop doesn't mount
 qcow images.

 Would be that difficult to write a qcow fs module ?

well, it would mean adapting the qemu disk image handling code to run in
kernel mode (or just reimplementing the required functionality), and
wrapping it inside a block device driver similar to 'loop'.

My solution is much simpler because it's just a small adapter (600 lines
of C) that links with the unmodified qemu source and it runs in user space.

Cheers,

 - Salva



___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Anthony Liguori

Paul Brook wrote:

mount -o loop does this.

How is everybody missing the point? :-)  mount -o loop doesn't mount
qcow images.

Would be that difficult to write a qcow fs module ?


qcow is an image format, not a filesystem.
I'd guess it should be possible to use the device-mapper framework to do this. 
I've ho idea how hard this is in practice.


http://wiki.xensource.com/xenwiki/DmUserspace

It will take a little time to get upstream though but it's actively 
being worked on.


Regards,

Anthony Liguori


Paul




___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Christian MICHON

On 12/12/06, Daniel Jacobowitz [EMAIL PROTECTED] wrote:

How is everybody missing the point? :-)  mount -o loop doesn't mount
qcow images.



you could also mount it through a samba tunnel

--
Christian


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


[Qemu-devel] [PATCH] Check machine type and endianness in softmmu ELF loader

2006-12-12 Thread Thiemo Seufer
Hello All,

the appended patch check the machine type and the endianness of the
ELF binaries involved. For MIPS it removes the raw binary backward
compatibility mode, recent kernels won't work with as raw binaries.

I'm not sure if machine check is good enough. If it has to check
for more than one EM_* value, or if we want to have a more descriptive
error message, then it might need to become a per-architecture
function hook.

Comnments?


Thiemo


Index: qemu-work/elf_ops.h
===
--- qemu-work.orig/elf_ops.h2006-12-07 22:24:45.0 +
+++ qemu-work/elf_ops.h 2006-12-12 18:43:11.0 +
@@ -153,6 +153,9 @@
 glue(bswap_ehdr, SZ)(ehdr);
 }
 
+if (ELF_MACHINE != ehdr.e_machine)
+goto fail;
+
 if (pentry)
*pentry = (uint64_t)ehdr.e_entry;
 
@@ -164,7 +167,7 @@
 if (!phdr)
 goto fail;
 if (read(fd, phdr, size) != size)
-goto fail;
+goto fail1;
 if (must_swab) {
 for(i = 0; i  ehdr.e_phnum; i++) {
 ph = phdr[i];
@@ -181,9 +184,9 @@
 data = qemu_mallocz(mem_size);
 if (ph-p_filesz  0) {
 if (lseek(fd, ph-p_offset, SEEK_SET)  0)
-goto fail;
+goto fail2;
 if (read(fd, data, ph-p_filesz) != ph-p_filesz)
-goto fail;
+goto fail2;
 }
 addr = ph-p_vaddr + virt_to_phys_addend;
 
@@ -197,9 +200,11 @@
 }
 qemu_free(phdr);
 return total_size;
- fail:
+fail2:
 qemu_free(data);
+fail1:
 qemu_free(phdr);
+fail:
 return -1;
 }
 
Index: qemu-work/hw/mips_r4k.c
===
--- qemu-work.orig/hw/mips_r4k.c2006-12-07 22:24:45.0 +
+++ qemu-work/hw/mips_r4k.c 2006-12-12 18:43:11.0 +
@@ -11,7 +11,6 @@
 
 #define BIOS_FILENAME mips_bios.bin
 //#define BIOS_FILENAME system.bin
-#define KERNEL_LOAD_ADDR 0x8001
 #define INITRD_LOAD_ADDR 0x8080
 
 #define VIRT_TO_PHYS_ADDEND (-0x8000LL)
@@ -77,14 +76,9 @@
 if (kernel_size = 0)
 env-PC = entry;
 else {
-kernel_size = load_image(kernel_filename,
- phys_ram_base + KERNEL_LOAD_ADDR + 
VIRT_TO_PHYS_ADDEND);
-if (kernel_size  0) {
-fprintf(stderr, qemu: could not load kernel '%s'\n,
-kernel_filename);
-exit(1);
-}
-env-PC = KERNEL_LOAD_ADDR;
+fprintf(stderr, qemu: could not load kernel '%s'\n,
+kernel_filename);
+exit(1);
 }
 
 /* load initrd */
Index: qemu-work/loader.c
===
--- qemu-work.orig/loader.c 2006-12-07 22:24:45.0 +
+++ qemu-work/loader.c  2006-12-12 18:43:11.0 +
@@ -197,7 +197,7 @@
 int load_elf(const char *filename, int64_t virt_to_phys_addend,
  uint64_t *pentry)
 {
-int fd, data_order, must_swab, ret;
+int fd, data_order, host_data_order, must_swab, ret;
 uint8_t e_ident[EI_NIDENT];
 
 fd = open(filename, O_RDONLY | O_BINARY);
@@ -218,7 +218,15 @@
 data_order = ELFDATA2LSB;
 #endif
 must_swab = data_order != e_ident[EI_DATA];
-
+
+#ifdef TARGET_WORDS_BIGENDIAN
+host_data_order = ELFDATA2MSB;
+#else
+host_data_order = ELFDATA2LSB;
+#endif
+if (host_data_order != e_ident[EI_DATA])
+return -1;
+
 lseek(fd, 0, SEEK_SET);
 if (e_ident[EI_CLASS] == ELFCLASS64) {
 ret = load_elf64(fd, virt_to_phys_addend, must_swab, pentry);
Index: qemu-work/target-mips/cpu.h
===
--- qemu-work.orig/target-mips/cpu.h2006-12-12 18:43:10.0 +
+++ qemu-work/target-mips/cpu.h 2006-12-12 19:07:44.0 +
@@ -3,6 +3,8 @@
 
 #define TARGET_HAS_ICE 1
 
+#define ELF_MACHINEEM_MIPS
+
 #include config.h
 #include mips-defs.h
 #include cpu-defs.h
Index: qemu-work/target-arm/cpu.h
===
--- qemu-work.orig/target-arm/cpu.h 2006-12-12 19:02:13.0 +
+++ qemu-work/target-arm/cpu.h  2006-12-12 19:08:45.0 +
@@ -22,6 +22,8 @@
 
 #define TARGET_LONG_BITS 32
 
+#define ELF_MACHINEEM_ARM
+
 #include cpu-defs.h
 
 #include softfloat.h
Index: qemu-work/target-i386/cpu.h
===
--- qemu-work.orig/target-i386/cpu.h2006-12-12 19:02:13.0 +
+++ qemu-work/target-i386/cpu.h 2006-12-12 19:08:35.0 +
@@ -36,6 +36,12 @@
 
 #define TARGET_HAS_ICE 1
 
+#ifdef TARGET_X86_64
+#define ELF_MACHINEEM_X86_64
+#else
+#define ELF_MACHINEEM_386
+#endif
+
 #include cpu-defs.h
 
 #include softfloat.h
Index: qemu-work/target-m68k/cpu.h

[Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Anthony Liguori

Salvador Fandino wrote:

Anthony Liguori wrote:

Salvador Fandiño wrote:

Hi,

The patch available from http://qemu-forum.ipi.fi/viewtopic.php?t=2718
adds a new utility, qemu-nbds, that implements a NBD server (see
http://nbd.sf.net) for QEMU images.

Using this utility it is posible to mount images in any format
supported by QEMU.

Unfortunatelly, only read access works (locally) due to a limitation
on the Linux Kernel :-(

http://hg.codemonkey.ws/qemu-nbd/

And write access works for me.  What's this limitation you speak of?


Mounting a partition being served on the same host as read-write can
cause deadlocks. From nbd-2.9.0 README file:


This text is pretty old.  Is this still valid?  This would imply that 
things like loop can result in dead locks.  I don't see why flushing one 
device would depend on the completion of another device.  Otherwise, if 
you had two disk adapters, they would always be operating in lock step.


As I've said, I've never seen a problem doing-write with nbd on localhost.

Regards,

Anthony Liguori


When you write something to a block device, the kernel will not
immediately write that to the physical block device; instead, your
changes are written to a cache, which is periodically flushed by a
kernel thread, 'kblockd'. If you're using a single-processor system,
then you'll have only one kblockd, meaning, the kernel can't write to
more than one block device at the same time.

If, while your kblockd is emptying the NBD buffer cache, the kernel
decides that the cache of the block device your nbd-server is writing to
needs to be emptied, then you've got a deadlock.

Regards,

 - Salva




___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] SCSI externals [PATCH] revisited

2006-12-12 Thread Russell Jackson
On Tue, Dec 12, 2006 at 07:02:33AM -0500, Chuck Brazie wrote:
 
 Here is a patch that merges the externals for IDE and SCSI with a --disk
 as Paul requested. Let me know if you want different keywords.
 

I was able to apply the patch (almost) cleanly to a November snapshot.
The following invocation results in the LSI controller showing up in
Windows XP, but the disk(s) do not. Am I missing something?

qemu -kernel-kqemu -localtime -std-vga \
-disk ide,img=winxp.qcow \
-disk scsi,img=test.qcow

It doesn't seem possible to boot from a SCSI disk currently. In fact,
there's a check in vl.c to see if there is at least one IDE disk that
just dumps the usage output without an error which was rather confusing
:-P. I'm thinking that the emulated BIOS doesn't support SCSI yet?

-- 
Russell A. Jackson [EMAIL PROTECTED]
Network Analyst
California State University, Bakersfield


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] SCSI externals [PATCH] revisited

2006-12-12 Thread Daniel Stekloff
On Tue, 2006-12-12 at 15:42 -0800, Russell Jackson wrote:
 On Tue, Dec 12, 2006 at 07:02:33AM -0500, Chuck Brazie wrote:
 
  Here is a patch that merges the externals for IDE and SCSI with a --disk
  as Paul requested. Let me know if you want different keywords.
 
 
 I was able to apply the patch (almost) cleanly to a November snapshot.
 The following invocation results in the LSI controller showing up in
 Windows XP, but the disk(s) do not. Am I missing something?
 
 qemu -kernel-kqemu -localtime -std-vga \
   -disk ide,img=winxp.qcow \
   -disk scsi,img=test.qcow


Is your test.qcow formatted? I had to go into the Windows admin
utilities and format the disk before it became available. 


 It doesn't seem possible to boot from a SCSI disk currently. In fact,
 there's a check in vl.c to see if there is at least one IDE disk that
 just dumps the usage output without an error which was rather confusing
 :-P. I'm thinking that the emulated BIOS doesn't support SCSI yet?


Yep, no support currently for booting off SCSI. 

Thanks,

Dan



___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] SCSI externals [PATCH] revisited

2006-12-12 Thread Russell Jackson
On Tue, Dec 12, 2006 at 03:57:36PM -0800, Daniel Stekloff wrote:
  I was able to apply the patch (almost) cleanly to a November snapshot.
  The following invocation results in the LSI controller showing up in
  Windows XP, but the disk(s) do not. Am I missing something?
  
  qemu -kernel-kqemu -localtime -std-vga \
  -disk ide,img=winxp.qcow \
  -disk scsi,img=test.qcow
 
 
 Is your test.qcow formatted? I had to go into the Windows admin
 utilities and format the disk before it became available. 
 
 

Well that's the problem; I can't format it. SCSI devices don't show up
in the disk management console at all.

-- 
Russell A. Jackson [EMAIL PROTECTED]
Network Analyst
California State University, Bakersfield


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel


Re: [Qemu-devel] Re: NBD server for QEMU images

2006-12-12 Thread Mark Williamson
  And write access works for me.  What's this limitation you speak of?
 
  Mounting a partition being served on the same host as read-write can
  cause deadlocks. From nbd-2.9.0 README file:

 This text is pretty old.  Is this still valid?  This would imply that
 things like loop can result in dead locks.  I don't see why flushing one
 device would depend on the completion of another device.  Otherwise, if
 you had two disk adapters, they would always be operating in lock step.

In the right kind of low memory condition, I guess they might...

 As I've said, I've never seen a problem doing-write with nbd on localhost.

If the NBD device is read-write, this implies it can have associated dirty 
pages.  If you're going to flush those, the kernel is going to have to talk 
to the userspace NBD server.  This is going to require the allocation of 
book-keeping data structures, skbufs, etc and possibly trigger some flushes 
of other dirty data and / or swapping.

I guess you could perhaps get into a loop of needing to flush dirty data to 
make space for data structures needed to flush dirty data?  Which would 
deadlock you quite effectively, but not necessarily be all *that* probably 
under moderate use...

Anybody have any more information on this?

Cheers,
Mark


 Regards,

 Anthony Liguori

  When you write something to a block device, the kernel will not
  immediately write that to the physical block device; instead, your
  changes are written to a cache, which is periodically flushed by a
  kernel thread, 'kblockd'. If you're using a single-processor system,
  then you'll have only one kblockd, meaning, the kernel can't write to
  more than one block device at the same time.
 
  If, while your kblockd is emptying the NBD buffer cache, the kernel
  decides that the cache of the block device your nbd-server is writing to
  needs to be emptied, then you've got a deadlock.
 
  Regards,
 
   - Salva

 ___
 Qemu-devel mailing list
 Qemu-devel@nongnu.org
 http://lists.nongnu.org/mailman/listinfo/qemu-devel

-- 
Dave: Just a question. What use is a unicyle with no seat?  And no pedals!
Mark: To answer a question with a question: What use is a skateboard?
Dave: Skateboards have wheels.
Mark: My wheel has a wheel!


___
Qemu-devel mailing list
Qemu-devel@nongnu.org
http://lists.nongnu.org/mailman/listinfo/qemu-devel