( ! ) Deprecated: Function WP_Dependencies-&gt;add_data() was called with an argument that is <strong>deprecated</strong> since version 6.9.0! IE conditional comments are ignored by all supported browsers. in /var/www/html/wp-includes/functions.php on line 6131
Call Stack
#TimeMemoryFunctionLocation
10.0000484224{main}( ).../index.php:0
20.0000484576require( '/var/www/html/wp-blog-header.php ).../index.php:17
30.11414327936require_once( '/var/www/html/wp-includes/template-loader.php ).../wp-blog-header.php:19
40.11514357256include( '/var/www/html/wp-content/themes/twentyfifteen/archive.php ).../template-loader.php:125
50.11514357256get_header( $name = ???, $args = ??? ).../archive.php:19
60.11514357472locate_template( $template_names = [0 => 'header.php'], $load = TRUE, $load_once = TRUE, $args = [] ).../general-template.php:48
70.11514357568load_template( $_template_file = '/var/www/html/wp-content/themes/twentyfifteen/header.php', $load_once = TRUE, $args = [] ).../template.php:749
80.11514358112require_once( '/var/www/html/wp-content/themes/twentyfifteen/header.php ).../template.php:814
90.11524365520wp_head( ).../header.php:18
100.11524365520do_action( $hook_name = 'wp_head' ).../general-template.php:3197
110.11524365736WP_Hook->do_action( $args = [0 => ''] ).../plugin.php:522
120.11524365736WP_Hook->apply_filters( $value = '', $args = [0 => ''] ).../class-wp-hook.php:365
130.11544368720wp_enqueue_scripts( '' ).../class-wp-hook.php:341
140.11544368720do_action( $hook_name = 'wp_enqueue_scripts' ).../script-loader.php:2311
150.11544368936WP_Hook->do_action( $args = [0 => ''] ).../plugin.php:522
160.11544368936WP_Hook->apply_filters( $value = '', $args = [0 => ''] ).../class-wp-hook.php:365
170.11554370824twentyfifteen_scripts( '' ).../class-wp-hook.php:341
180.11574372344wp_style_add_data( $handle = 'twentyfifteen-ie', $key = 'conditional', $value = 'lt IE 9' ).../functions.php:440
190.11574372344WP_Styles->add_data( $handle = 'twentyfifteen-ie', $key = 'conditional', $value = 'lt IE 9' ).../functions.wp-styles.php:245
200.11574372344WP_Dependencies->add_data( $handle = 'twentyfifteen-ie', $key = 'conditional', $value = 'lt IE 9' ).../class-wp-styles.php:385
210.11574491128_deprecated_argument( $function_name = 'WP_Dependencies->add_data()', $version = '6.9.0', $message = 'IE conditional comments are ignored by all supported browsers.' ).../class-wp-dependencies.php:317
220.11574497080wp_trigger_error( $function_name = '', $message = 'Function WP_Dependencies->add_data() was called with an argument that is <strong>deprecated</strong> since version 6.9.0! IE conditional comments are ignored by all supported browsers.', $error_level = 16384 ).../functions.php:5925
230.11584497832trigger_error( $message = 'Function WP_Dependencies-&gt;add_data() was called with an argument that is <strong>deprecated</strong> since version 6.9.0! IE conditional comments are ignored by all supported browsers.', $error_level = 16384 ).../functions.php:6131

( ! ) Deprecated: Function WP_Dependencies-&gt;add_data() was called with an argument that is <strong>deprecated</strong> since version 6.9.0! IE conditional comments are ignored by all supported browsers. in /var/www/html/wp-includes/functions.php on line 6131
Call Stack
#TimeMemoryFunctionLocation
10.0000484224{main}( ).../index.php:0
20.0000484576require( '/var/www/html/wp-blog-header.php ).../index.php:17
30.11414327936require_once( '/var/www/html/wp-includes/template-loader.php ).../wp-blog-header.php:19
40.11514357256include( '/var/www/html/wp-content/themes/twentyfifteen/archive.php ).../template-loader.php:125
50.11514357256get_header( $name = ???, $args = ??? ).../archive.php:19
60.11514357472locate_template( $template_names = [0 => 'header.php'], $load = TRUE, $load_once = TRUE, $args = [] ).../general-template.php:48
70.11514357568load_template( $_template_file = '/var/www/html/wp-content/themes/twentyfifteen/header.php', $load_once = TRUE, $args = [] ).../template.php:749
80.11514358112require_once( '/var/www/html/wp-content/themes/twentyfifteen/header.php ).../template.php:814
90.11524365520wp_head( ).../header.php:18
100.11524365520do_action( $hook_name = 'wp_head' ).../general-template.php:3197
110.11524365736WP_Hook->do_action( $args = [0 => ''] ).../plugin.php:522
120.11524365736WP_Hook->apply_filters( $value = '', $args = [0 => ''] ).../class-wp-hook.php:365
130.11544368720wp_enqueue_scripts( '' ).../class-wp-hook.php:341
140.11544368720do_action( $hook_name = 'wp_enqueue_scripts' ).../script-loader.php:2311
150.11544368936WP_Hook->do_action( $args = [0 => ''] ).../plugin.php:522
160.11544368936WP_Hook->apply_filters( $value = '', $args = [0 => ''] ).../class-wp-hook.php:365
170.11554370824twentyfifteen_scripts( '' ).../class-wp-hook.php:341
180.14214498936wp_style_add_data( $handle = 'twentyfifteen-ie7', $key = 'conditional', $value = 'lt IE 8' ).../functions.php:444
190.14214498936WP_Styles->add_data( $handle = 'twentyfifteen-ie7', $key = 'conditional', $value = 'lt IE 8' ).../functions.wp-styles.php:245
200.14214498936WP_Dependencies->add_data( $handle = 'twentyfifteen-ie7', $key = 'conditional', $value = 'lt IE 8' ).../class-wp-styles.php:385
210.14214498936_deprecated_argument( $function_name = 'WP_Dependencies->add_data()', $version = '6.9.0', $message = 'IE conditional comments are ignored by all supported browsers.' ).../class-wp-dependencies.php:317
220.14214499256wp_trigger_error( $function_name = '', $message = 'Function WP_Dependencies->add_data() was called with an argument that is <strong>deprecated</strong> since version 6.9.0! IE conditional comments are ignored by all supported browsers.', $error_level = 16384 ).../functions.php:5925
230.14224499480trigger_error( $message = 'Function WP_Dependencies-&gt;add_data() was called with an argument that is <strong>deprecated</strong> since version 6.9.0! IE conditional comments are ignored by all supported browsers.', $error_level = 16384 ).../functions.php:6131

MySQL 服务器调优

2007 年 7 月 30 日


如今,开发人员不断地开发和部署使用 LAMP(Linux®、Apache、MySQL 和 PHP/Perl)架构的应用程序。但是,服务器管理员常常对应用程序本身没有什么控制能力,因为应用程序是别人编写的。这份 共三部分的系列文章 将讨论许多服务器配置问题,这些配置会影响应用程序的性能。本文是本系列文章的第三部分,也是最后一部分,将重点讨论为实现最高效率而对数据库层进行的调优。

关于 MySQL 调优


有 3 种方法可以加快 MySQL 服务器的运行速度,效率从低到高依次为:



  1. 替换有问题的硬件。
  2. 对 MySQL 进程的设置进行调优。
  3. 对查询进行优化。

替换有问题的硬件通常是我们的第一考虑,主要原因是数据库会占用大量资源。不过这种解决方案也就仅限于此了。实际上,您通常可以让中央处理器(CPU)或磁盘速度加倍,也可以让内存增大 4 到 8 倍。


第二种方法是对 MySQL 服务器(也称为 mysqld)进行调优。对这个进程进行调优意味着适当地分配内存,并让 mysqld 了解将会承受何种类型的负载。加快磁盘运行速度不如减少所需的磁盘访问次数。类似地,确保 MySQL 进程正确操作就意味着它花费在服务查询上的时间要多于花费在处理后台任务(如处理临时磁盘表或打开和关闭文件)上的时间。对 mysqld 进行调优是本文的重点。


最好的方法是确保查询已经进行了优化。这意味着对表应用了适当的索引,查询是按照可以充分利用 MySQL 功能的方式来编写的。尽管本文并没有包含查询调优方面的内容(很多著作中已经针对这个主题进行了探讨),不过它会配置 mysqld 来报告可能需要进行调优的查询。


虽然已经为这些任务指派了次序,但是仍然要注意硬件和 mysqld 的设置以利于适当地调优查询。机器速度慢也就罢了,我曾经见过速度很快的机器在运行设计良好的查询时由于负载过重而失败,因为 mysqld 被大量繁忙的工作所占用而不能服务查询。





















记录慢速查询


在一个 SQL 服务器中,数据表都是保存在磁盘上的。索引为服务器提供了一种在表中查找特定数据行的方法,而不用搜索整个表。当必须要搜索整个表时,就称为表扫描。通常来说,您可能只希望获得表中数据的一个子集,因此全表扫描会浪费大量的磁盘 I/O,因此也就会浪费大量时间。当必须对数据进行连接时,这个问题就更加复杂了,因为必须要对连接两端的多行数据进行比较。


当然,表扫描并不总是会带来问题;有时读取整个表反而会比从中挑选出一部分数据更加有效(服务器进程中查询规划器用来作出这些决定)。如果索引的使用效率很低,或者根本就不能使用索引,则会减慢查询速度,而且随着服务器上的负载和表大小的增加,这个问题会变得更加显著。执行时间超过给定时间范围的查询就称为慢速查询


您可以配置 mysqld 将这些慢速查询记录到适当命名的慢速查询日志中。管理员然后会查看这个日志来帮助他们确定应用程序中有哪些部分需要进一步调查。清单 1 给出了要启用慢速查询日志需要在 my.cnf 中所做的配置。


清单 1. 启用 MySQL 慢速查询日志





[mysqld]
; enable the slow query log, default 10 seconds
log-slow-queries
; log queries taking longer than 5 seconds
long_query_time = 5
; log queries that don’t use indexes even if they take less than long_query_time
; MySQL 4.1 and newer only
log-queries-not-using-indexes


这三个设置一起使用,可以记录执行时间超过 5 秒和没有使用索引的查询。请注意有关 log-queries-not-using-indexes 的警告:您必须使用 MySQL 4.1 或更高版本。慢速查询日志都保存在 MySQL 数据目录中,名为 hostname-slow.log。如果希望使用一个不同的名字或路径,可以在 my.cnf 中使用 log-slow-queries = /new/path/to/file 实现此目的。


阅读慢速查询日志最好是通过 mysqldumpslow 命令进行。指定日志文件的路径,就可以看到一个慢速查询的排序后的列表,并且还显示了它们在日志文件中出现的次数。一个非常有用的特性是 mysqldumpslow 在比较结果之前,会删除任何用户指定的数据,因此对同一个查询的不同调用被计为一次;这可以帮助找出需要工作量最多的查询。





















对查询进行缓存


很多 LAMP 应用程序都严重依赖于数据库,但却会反复执行相同的查询。每次执行查询时,数据库都必须要执行相同的工作 —— 对查询进行分析,确定如何执行查询,从磁盘中加载信息,然后将结果返回给客户机。MySQL 有一个特性称为查询缓存,它将(后面会用到的)查询结果保存在内存中。在很多情况下,这会极大地提高性能。不过,问题是查询缓存在默认情况下是禁用的。


query_cache_size = 32M 添加到 /etc/my.conf 中可以启用 32MB 的查询缓存。


监视查询缓存


在启用查询缓存之后,重要的是要理解它是否得到了有效的使用。MySQL 有几个可以查看的变量,可以用来了解缓存中的情况。清单 2 给出了缓存的状态。


清单 2. 显示查询缓存的统计信息





mysql> SHOW STATUS LIKE ‘qcache%’;
+————————-+————+
| Variable_name | Value |
+————————-+————+
| Qcache_free_blocks | 5216 |
| Qcache_free_memory | 14640664 |
| Qcache_hits | 2581646882 |
| Qcache_inserts | 360210964 |
| Qcache_lowmem_prunes | 281680433 |
| Qcache_not_cached | 79740667 |
| Qcache_queries_in_cache | 16927 |
| Qcache_total_blocks | 47042 |
+————————-+————+
8 rows in set (0.00 sec)


这些项的解释如表 1 所示。


表 1. MySQL 查询缓存变量





























变量名 说明
Qcache_free_blocks 缓存中相邻内存块的个数。数目大说明可能有碎片。FLUSH QUERY CACHE 会对缓存中的碎片进行整理,从而得到一个空闲块。
Qcache_free_memory 缓存中的空闲内存。
Qcache_hits 每次查询在缓存中命中时就增大。
Qcache_inserts 每次插入一个查询时就增大。命中次数除以插入次数就是不中比率;用 1 减去这个值就是命中率。在上面这个例子中,大约有 87% 的查询都在缓存中命中。
Qcache_lowmem_prunes 缓存出现内存不足并且必须要进行清理以便为更多查询提供空间的次数。这个数字最好长时间来看;如果这个数字在不断增长,就表示可能碎片非常严重,或者内存很少。(上面的 free_blocksfree_memory 可以告诉您属于哪种情况)。
Qcache_not_cached 不适合进行缓存的查询的数量,通常是由于这些查询不是 SELECT 语句。
Qcache_queries_in_cache 当前缓存的查询(和响应)的数量。
Qcache_total_blocks 缓存中块的数量。

通常,间隔几秒显示这些变量就可以看出区别,这可以帮助确定缓存是否正在有效地使用。运行 FLUSH STATUS 可以重置一些计数器,如果服务器已经运行了一段时间,这会非常有帮助。


使用非常大的查询缓存,期望可以缓存所有东西,这种想法非常诱人。由于 mysqld 必须要对缓存进行维护,例如当内存变得很低时执行剪除,因此服务器可能会在试图管理缓存时而陷入困境。作为一条规则,如果 FLUSH QUERY CACHE 占用了很长时间,那就说明缓存太大了。





















强制限制


您可以在 mysqld 中强制一些限制来确保系统负载不会导致资源耗尽的情况出现。清单 3 给出了 my.cnf 中与资源有关的一些重要设置。


清单 3. MySQL 资源设置





set-variable=max_connections=500
set-variable=wait_timeout=10
max_connect_errors = 100


连接最大个数是在第一行中进行管理的。与 Apache 中的 MaxClients 类似,其想法是确保只建立服务允许数目的连接。要确定服务器上目前建立过的最大连接数,请执行 SHOW STATUS LIKE ‘max_used_connections’


第 2 行告诉 mysqld 终止所有空闲时间超过 10 秒的连接。在 LAMP 应用程序中,连接数据库的时间通常就是 Web 服务器处理请求所花费的时间。有时候,如果负载过重,连接会挂起,并且会占用连接表空间。如果有多个交互用户或使用了到数据库的持久连接,那么将这个值设低一点并不可取!


最后一行是一个安全的方法。如果一个主机在连接到服务器时有问题,并重试很多次后放弃,那么这个主机就会被锁定,直到 FLUSH HOSTS 之后才能运行。默认情况下,10 次失败就足以导致锁定了。将这个值修改为 100 会给服务器足够的时间来从问题中恢复。如果重试 100 次都无法建立连接,那么使用再高的值也不会有太多帮助,可能它根本就无法连接。





















缓冲区和缓存


MySQL 支持超过 100 个的可调节设置;但是幸运的是,掌握少数几个就可以满足大部分需要。查找这些设置的正确值可以通过 SHOW STATUS 命令查看状态变量,从中可以确定 mysqld 的运作情况是否符合我们的预期。给缓冲区和缓存分配的内存不能超过系统中的现有内存,因此调优通常都需要进行一些妥协。


MySQL 可调节设置可以应用于整个 mysqld 进程,也可以应用于单个客户机会话。


服务器端的设置


每个表都可以表示为磁盘上的一个文件,必须先打开,后读取。为了加快从文件中读取数据的过程,mysqld 对这些打开文件进行了缓存,其最大数目由 /etc/mysqld.conf 中的 table_cache 指定。清单 4 给出了显示与打开表有关的活动的方式。


清单 4. 显示打开表的活动





mysql> SHOW STATUS LIKE ‘open%tables’;
+—————+——-+
| Variable_name | Value |
+—————+——-+
| Open_tables | 5000 |
| Opened_tables | 195 |
+—————+——-+
2 rows in set (0.00 sec)


清单 4 说明目前有 5,000 个表是打开的,有 195 个表需要打开,因为现在缓存中已经没有可用文件描述符了(由于统计信息在前面已经清除了,因此可能会存在 5,000 个打开表中只有 195 个打开记录的情况)。如果 Opened_tables 随着重新运行 SHOW STATUS 命令快速增加,就说明缓存命中率不够。如果 Open_tablestable_cache 设置小很多,就说明该值太大了(不过有空间可以增长总不是什么坏事)。例如,使用 table_cache = 5000 可以调整表的缓存。


与表的缓存类似,对于线程来说也有一个缓存。 mysqld 在接收连接时会根据需要生成线程。在一个连接变化很快的繁忙服务器上,对线程进行缓存便于以后使用可以加快最初的连接。


清单 5 显示如何确定是否缓存了足够的线程。


清单 5. 显示线程使用统计信息





mysql> SHOW STATUS LIKE ‘threads%’;
+——————-+——–+
| Variable_name | Value |
+——————-+——–+
| Threads_cached | 27 |
| Threads_connected | 15 |
| Threads_created | 838610 |
| Threads_running | 3 |
+——————-+——–+
4 rows in set (0.00 sec)


此处重要的值是 Threads_created,每次 mysqld 需要创建一个新线程时,这个值都会增加。如果这个数字在连续执行 SHOW STATUS 命令时快速增加,就应该尝试增大线程缓存。例如,可以在 my.cnf 中使用 thread_cache = 40 来实现此目的。


关键字缓冲区保存了 MyISAM 表的索引块。理想情况下,对于这些块的请求应该来自于内存,而不是来自于磁盘。清单 6 显示了如何确定有多少块是从磁盘中读取的,以及有多少块是从内存中读取的。


清单 6. 确定关键字效率





mysql> show status like ‘%key_read%’;
+——————-+———–+
| Variable_name | Value |
+——————-+———–+
| Key_read_requests | 163554268 |
| Key_reads | 98247 |
+——————-+———–+
2 rows in set (0.00 sec)


Key_reads 代表命中磁盘的请求个数, Key_read_requests 是总数。命中磁盘的读请求数除以读请求总数就是不中比率 —— 在本例中每 1,000 个请求,大约有 0.6 个没有命中内存。如果每 1,000 个请求中命中磁盘的数目超过 1 个,就应该考虑增大关键字缓冲区了。例如,key_buffer = 384M 会将缓冲区设置为 384MB。


临时表可以在更高级的查询中使用,其中数据在进一步进行处理(例如 GROUP BY 字句)之前,都必须先保存到临时表中;理想情况下,在内存中创建临时表。但是如果临时表变得太大,就需要写入磁盘中。清单 7 给出了与临时表创建有关的统计信息。


清单 7. 确定临时表的使用





mysql> SHOW STATUS LIKE ‘created_tmp%’;
+————————-+——-+
| Variable_name | Value |
+————————-+——-+
| Created_tmp_disk_tables | 30660 |
| Created_tmp_files | 2 |
| Created_tmp_tables | 32912 |
+————————-+——-+
3 rows in set (0.00 sec)


每次使用临时表都会增大 Created_tmp_tables;基于磁盘的表也会增大 Created_tmp_disk_tables。对于这个比率,并没有什么严格的规则,因为这依赖于所涉及的查询。长时间观察 Created_tmp_disk_tables 会显示所创建的磁盘表的比率,您可以确定设置的效率。 tmp_table_sizemax_heap_table_size 都可以控制临时表的最大大小,因此请确保在 my.cnf 中对这两个值都进行了设置。


每个会话的设置


下面这些设置针对于每个会话。在设置这些数字时要十分谨慎,因为它们在乘以可能存在的连接数时候,这些选项表示大量的内存!您可以通过代码修改会话中的这些数字,或者在 my.cnf 中为所有会话修改这些设置。


当 MySQL 必须要进行排序时,就会在从磁盘上读取数据时分配一个排序缓冲区来存放这些数据行。如果要排序的数据太大,那么数据就必须保存到磁盘上的临时文件中,并再次进行排序。如果 sort_merge_passes 状态变量很大,这就指示了磁盘的活动情况。清单 8 给出了一些与排序相关的状态计数器信息。


清单 8. 显示排序统计信息





mysql> SHOW STATUS LIKE “sort%”;
+——————-+———+
| Variable_name | Value |
+——————-+———+
| Sort_merge_passes | 1 |
| Sort_range | 79192 |
| Sort_rows | 2066532 |
| Sort_scan | 44006 |
+——————-+———+
4 rows in set (0.00 sec)


如果 sort_merge_passes 很大,就表示需要注意 sort_buffer_size。例如, sort_buffer_size = 4M 将排序缓冲区设置为 4MB。


MySQL 也会分配一些内存来读取表。理想情况下,索引提供了足够多的信息,可以只读入所需要的行,但是有时候查询(设计不佳或数据本性使然)需要读取表中大量数据。要理解这种行为,需要知道运行了多少个 SELECT 语句,以及需要读取表中的下一行数据的次数(而不是通过索引直接访问)。实现这种功能的命令如清单 9 所示。


清单 9. 确定表扫描比率





mysql> SHOW STATUS LIKE “com_select”;
+—————+——–+
| Variable_name | Value |
+—————+——–+
| Com_select | 318243 |
+—————+——–+
1 row in set (0.00 sec)

mysql> SHOW STATUS LIKE “handler_read_rnd_next”;
+———————–+———–+
| Variable_name | Value |
+———————–+———–+
| Handler_read_rnd_next | 165959471 |
+———————–+———–+
1 row in set (0.00 sec)



Handler_read_rnd_next / Com_select 得出了表扫描比率 —— 在本例中是 521:1。如果该值超过 4000,就应该查看 read_buffer_size,例如 read_buffer_size = 4M。如果这个数字超过了 8M,就应该与开发人员讨论一下对这些查询进行调优了!





















3 个必不可少的工具


尽管在了解具体设置时,SHOW STATUS 命令会非常有用,但是您还需要一些工具来解释 mysqld 所提供的大量数据。我发现有 3 个工具是必不可少的;在 参考资料 一节中您可以找到相应的链接。


大部分系统管理员都非常熟悉 top 命令,它为任务所消耗的 CPU 和内存提供了一个不断更新的视图。 mytoptop 进行了仿真;它为所有连接上的客户机以及它们正在运行的查询提供了一个视图。mytop 还提供了一个有关关键字缓冲区和查询缓存效率的实时数据和历史数据,以及有关正在运行的查询的统计信息。这是一个很有用的工具,可以查看系统中(比如 10 秒钟之内)的状况,您可以获得有关服务器健康信息的视图,并显示导致问题的任何连接。


mysqlard 是一个连接到 MySQL 服务器上的守护程序,负责每 5 分钟搜集一次数据,并将它们存储到后台的一个 Round Robin Database 中。有一个 Web 页面会显示这些数据,例如表缓存的使用情况、关键字效率、连接上的客户机以及临时表的使用情况。尽管 mytop 提供了服务器健康信息的快照,但是 mysqlard 则提供了长期的健康信息。作为奖励,mysqlard 使用自己搜集到的一些信息针对如何对服务器进行调优给出一些建议。


搜集 SHOW STATUS 信息的另外一个工具是 mysqlreport。其报告要远比 mysqlard 更加复杂,因为需要对服务器的每个方面都进行分析。这是对服务器进行调优的一个非常好的工具,因为它对状态变量进行适当计算来帮助确定需要修正哪些问题。



 

本文介绍了对 MySQL 进行调优的一些基础知识,并对这个针对 LAMP 组件进行调优的 3 部分系列文章进行了总结。调优很大程度上需要理解组件的工作原理,确定它们是否正常工作,进行一些调整,并重新评测。每个组件 —— Linux、Apache、PHP 或 MySQL —— 都有各种各样的需求。分别理解各个组件可以帮助减少可能会导致应用程序速度变慢的瓶颈。

 


参考资料

学习


获得产品和技术


  • 尽管已经出版了 3 年之久了, High Performance MySQL 仍然是非常有价值的一本书。作者也有一个 Web 站点介绍 有关 MySQL 的各种文章


  • mytop 告诉您目前 MySQL 服务器上都在进行什么操作,并提供一些关键的统计信息。在发现数据库有问题时,应该首先求助于这个程序。


  • mysqlard 会给出 MySQL 服务器一个关键性能指示器的图形表示,并给出一些调优建议。


  • mysqlreport 是一个必须的工具。它为您分析 SHOW STATUS 变量。


  • MySQL 文章如果没有提供到 phpMyAdmin 的链接,就说不上完整。尽管已经给出了对状态变量的一些解释,但是这个产品的强大之处在于如何简化管理任务。


  • 定购 SEK for Linux,共包含两张 DVD,其中有用于 Linux 的最新 IBM 试用软件,包括 DB2®、Lotus®、Rational®、Tivoli® 和 WebSphere®。


  • 利用可直接从 developerWorks 下载的 IBM 试用软件 在 Linux 上构建您的下一个开发项目。


讨论

jetty 使用手記

jetty resin tomcat 測試報告:http://www.strongd.net/blog/show/255


       一直以來使用jetty作為我的web開發配置服務器,開始的時候和所有的初學者一様使用tomcat作為開發服務器,可用着用着,感覺tomcat越來越繁瑣以及龐大。後來,用了jboss,知道jboss使用jetty作為其web應用服務器,所以就開始試着使用jetty。從那開始後,jetty就成為我的開發配置服務器了,從最初的4.0,到現在的6.0一直在使用着。

  
喜歡jetty的原因,在于其的方便,簡單的配置文件,簡單的啓動脚本,而且在elipse或者其他ide中,進行調試,運行都很方便。

  
不想多説什麽,讓事實來説話吧。在開始之前,先下載jetty。目前最新的版本為6.1:
   http://docs.codehaus.org/display/JETTY/Downloading+and+Installing#download

  
以前的版本文件是比較小的,現在的版本增加了很多東東,主要是很多例子應用,以及源碼,經過删减只有,整個應用還是比較小的,大概也就10m多了。如果只是需要運行web應用,并且只是需要jsp 2.1規範的話,只有7m多了。以前4.0的時候只有2m多,現在也已經增加了那麽多了,時代在進步,没有辦法。

   jetty
主要的jarjetty-6.1.1.jarservlet-api-2.5-6.1.1.jarjetty-util-6.1.1.jar。啓動的jar start.jar。還有jsp規範的jarjsp2.1,好像已經减了不少的jar了,只有4個文件core-3.1.1.jarant-1.6.5.jarjsp-2.1.jarjsp-api-2.1.jarcore是使用eclipsejdt,進行jsp編譯。

   jetty
的主要配置文件為etc/jetty.xml,當然你可以自己指定彆的文件。在start.jar中有個start.config文件是默認的環境配置,以及指定默認的配置文件。可以手工替换。

  
啓動jetty很簡單,在命令行下面java -jar start.jar
  
如果需要指定start.config,使用java -DSTART=start.config -jar start.jart
  
配置web 應用也非常的簡單:
  
更改jetty.xml就行了,增加web應用的方式包括,直接放置應用在webapps下面,或者配置以下的context

  



<New id=”Mywork” class=”org.mortbay.jetty.webapp.WebAppContext”>
      
<Arg><Ref id=”Contexts”/></Arg>
      
<!– 
絶對路徑,可以指定相對路徑,增加 <SystemProperty name=”jetty.home” default=”.”/> 就行–>
      
<Arg>d:/workspace/strong/web/</Arg>
      
<Arg>/mywork</Arg>
      
<Set name=”defaultsDescriptor”><SystemProperty name=”jetty.home” default=”.”/>/etc/webdefault.xml</Set>
      
<Set name=”virtualHosts”>
        
<Array type=”java.lang.String”>
          
<Item>www.strongd.net</Item>
        
</Array>
      
</Set>
      
    
</New>




要想改變原先的webapps主應用,改變下面的配置


<Call class=”org.mortbay.jetty.webapp.WebAppContext” name=”addWebApplications”>
      
<Arg><Ref id=”Contexts”/></Arg>
      
<Arg><SystemProperty name=”jetty.home” default=”.”/>/webapps</Arg>
      
<Arg><SystemProperty name=”jetty.home” default=”.”/>/etc/webdefault.xml</Arg>
      
<Arg type=”boolean”>True</Arg>  <!– extract –>
      
<Arg type=”boolean”>False</Arg> <!– parent priority class loading –>
 
</Call>




默認的web.xml配置文件為webdefault.xml
如果想配置相應的web參數,可以更改其應用。

默認的端口為8080,如果想修改,更改:jetty.port屬性



    
<Call name=”addConnector”>
      
<Arg>
          
<New class=”org.mortbay.jetty.nio.SelectChannelConnector”>
            
<Set name=”port”><SystemProperty name=”jetty.port” default=”8080″/></Set>
            
<Set name=”maxIdleTime”>30000</Set>
            
<Set name=”Acceptors”>2</Set>
            
<Set name=”confidentialPort”>8443</Set>
          
</New>
      
</Arg>
    
</Call>





簡單的配置,簡單的啓動,下一篇,我會介紹,如何在eclipse中使用jetty.




resin vs jetty


服務器:redhat as 4 2.6.9-22.ELsmp
           Intel(R) Pentium(R) D CPU 2.80GHz
           2G記憶體
           160G SATA


客戶機:WINXP SP2
           Intel(R) Pentium(r) 4 CPU 2.93GHz
           1G記憶體
            80G IDE硬碟


測試軟體:Load Runner 7.8


並發用戶數: 500


測試代碼:


<%@ page language=”java” import=”java.util.*” pageEncoding=”ISO-8859-1″%>
<%
String path = request.getContextPath();
String basePath = request.getScheme()+”://”+request.getServerName()+”:”+request.getServerPort()+path+”/”;


HashMap m = new HashMap();
for(int i=0;i<10000;i++)
 m.put(i,i);
m.clear();
m = null;
%>


<!DOCTYPE HTML PUBLIC “-//W3C//DTD HTML 4.01 Transitional//EN”>
<html>
  <head>
    <base href=”<%=basePath%>”>
   
    <title>My JSP ‘test.jsp’ starting page</title>
   
 <meta http-equiv=”pragma” content=”no-cache”>
 <meta http-equiv=”cache-control” content=”no-cache”>
 <meta http-equiv=”expires” content=”0″>   
 <meta http-equiv=”keywords” content=”keyword1,keyword2,keyword3″>
 <meta http-equiv=”description” content=”This is my page”>
 <!–
 <link rel=”stylesheet” type=”text/css” href=”styles.css”>
 –>


  </head>
 
  <body>
    This is my JSP page. <%=basePath%><br>
  </body>
</html>


測試結果:


jetty 6.1.5







 分析摘要 周期: 30-09-2007 10:20:45 – 30-09-2007 10:21:09












方案名: Scenario1
会话的结果文件: e:\Temp\res\res.lrr
持续时间: 24秒.





 统计信息摘要 

























  最大运行 Vuser 数: 500
  总吞吐量(字节): 439,733
  平均吞吐量(字节/秒): 17,589
  总点击次数: 500
  平均每秒点击次数: 20   查看 HTTP 响应摘要





 事务摘要 











  事务: 通过总数: 2,000 失败总数: 0 停止总数: 0          平均响应时间





















































事务名 最小值 平均值 最大值 标准偏差 90% 通过 失败 停止
index 0.634 2.209 3.847 0.856 3.283 500 0 0






 HTTP 响应摘要 













HTTP 响应 总计 每秒
HTTP_200 500 20


 

 

 

Resin pro 3.0.23







 分析摘要 周期: 30-09-2007 10:23:44 – 30-09-2007 10:24:15












方案名: Scenario1
会话的结果文件: e:\Temp\res\res.lrr
持续时间: 31秒.





 统计信息摘要 

























  最大运行 Vuser 数: 500
  总吞吐量(字节): 428,380
  平均吞吐量(字节/秒): 13,387
  总点击次数: 500
  平均每秒点击次数: 15.625   查看 HTTP 响应摘要





 事务摘要 











  事务: 通过总数: 2,000 失败总数: 0 停止总数: 0          平均响应时间





















































事务名 最小值 平均值 最大值 标准偏差 90% 通过 失败 停止
index 0.652 6.722 11.39 4.065 10.882 500 0 0






 HTTP 响应摘要 













HTTP 响应 总计 每秒
HTTP_200 500 15.625


 

總結:

      雖然這次測試比較簡單.但也應該可以體現出jetty性能比resin好一點.而且resin還購買了正版的licenses,沒有把tomcat加入測試.有時間再測試一個tomcat吧

支援藍光燒錄機的免費燒錄軟體:AVS Disc Creator

 軟體:AVS Disc Creator(版本:2.1.5.100)
類別:燒錄程式
性質:Freeware(5.6 M)

【編輯/高啟唐】

AVS Disc Creator是一個免費的光碟燒錄軟體,不但支援CD、DVD光碟燒錄機,就連最新的藍光燒錄機也都支援了,所用者完全不必擔心相容性上的問題。

所有常用的燒錄功能,AVS Disc Creator可說是全具備了,像是資料光碟、影片光碟、MP3光碟與製作光碟映像檔等功能,AVS Disc Creator都可以燒錄,功能不比其他付費燒錄軟體遜色。

另外,AVS Disc Creator還支援抹除可複寫式光碟與專案排程等燒錄輔助功能。還在煩惱找不到合用且免費的燒錄軟體嗎?建議你來試試AVS Disc Creator!


下載:http://www.avsmedia.com/download/AVSDiscCreator.exe

Jetty vs. Tomcat vs. Resin: A Performance Comparison

This morning, I did some comparisons between Jetty 5.1.5rc1, Tomcat 5.5.9 and Resin 3.0.14 (OS version). I ran AppFuse’s “test-canoo” target, which tests all the JSPs using Canoo WebTest. I did this as a Servlet 2.4 application, and had to tweak some stuff in my web.xml to make it work on Jetty and Resin. Nothing big, just stuff that Tomcat let pass through and these servers didn’t. One interesting thing to note that Resin requires you to use “http://java.sun.com/jstl/fmt” for JSTL’s “fmt” tag URI, while Jetty and Tomcat require “http://java.sun.com/jstl/fmt_rt”. This is with Resin’s “fast-jstl” turned off – b/c everything blows up if it’s turned on (I don’t feel like coding my JSTL to Resin’s standards, that’s why I turn it off).

Below is a list of the average time it took to run “test-canoo” after I ran it once to compile all the JSPs.


  • Jetty: 19 seconds
  • Tomcat: 19 seconds
  • Resin: 29 seconds

In addition, I tested how long it took for each server to startup – including the initialization of AppFuse.



  • Jetty: 7 seconds
  • Tomcat: 8 seconds
  • Resin: 13 seconds

So what does all this mean? A number of things:



  • I need to clean up AppFuse’s web.xml a bit for 2.4 applications.
  • Putting the database connection pool configuration in a Spring context file (vs. JNDI) makes AppFuse much more portable.
  • Jetty isn’t as fast as Jetty-lovers say it is (or maybe Tomcat just caught up).
  • The open source version of Resin is much slower than the other open source servlet containers.
  • I should restructure the build.xml to pull out Tomcat stuff and allow users to specify server deployment settings (i.e. in a ${servername}.xml file).
  • Orion still doesn’t support the Servlet 2.4 or JSP 2.0 specifications.

Generate not repeat random numbers


for(int i=;i<100;i++){

ArrayList<Integer> tmp_l = new ArrayList<Integer>();

int pos = 100;
int serial = (int)Math.round(Math.random()*(pos-1));
int loopcontrol = 0;

while(tmp_l.contains(serial)){
    if(loopcontrol>100) break;
    serial = (int)Math.round(Math.random()*(pos-1));
    loopcontrol ++;
}
   
tmp_l.add(serial);

System.out.println(“The random number for “+i+” is:”+serial);

}

只給出代碼片段,

先有機出一個數,保存在ArrayList,再有機第二個數,看看是否存在於ArrayList中,如果存在,再重新有機第二個數.如此類推..直到完成100個有機數為此!

支援多種格式的免費映像檔燒錄軟體:ImgBurn

 

隨著燒錄機的普及,網路上越來越多大型檔案都改以映像檔格式傳輸;不過映像檔下載回來後需要燒成光碟才能讀取,是否有支援多種格式而且還是免費的映像檔燒錄軟體呢?

來試試ImgBurn吧!ImgBurn正是一套免費的映像檔燒錄軟體,它不但可以燒錄ISO、MDS、BIN、DI、DVD、GI、IMG、NRG、 PDI、CDI、CDR、GCM、IBQ、LST、VDI等市面上最常見的映像檔,還支援CD、DVD、HD DVD、Blu-ray諸多格式喔!

除了能燒錄映像檔,ImgBurn還具備製作映像檔的功能,雖然只能製作ISO格式,但比起一堆需付費的同類型軟體已經強太多了!而ImgBurn的介面簡明易懂,體積小不會佔用太多電腦資源,對於有燒錄映像檔需求的人來說,絕對是不可多得的好幫手!

 

Speeding up Linux Using hdparm

Are you running an Intel Linux system with at least one (E)IDE hard drive?

Wouldn’t it be neat if there were a magical command to instantly double the I/O performance of your disks? Or, in some cases, show 6 to 10 times your existing throughput?


Did you ever just wonder how to tell what kind of performance you’re getting on your “tricked-out” Linux box?


Don’t overlook hdparm(8). If you’ve never heard of it, don’t worry. Most people I’ve talked to haven’t either. But if you’re running an IDE/Linux system (as many folks are,) you’ll wonder how you ever got this far without it. I know I did.


What’s the big deal?


So, you’ve got your brand-new UltraATA/66 EIDE drive with a screaming brand-new controller chipset that supports multiple PIO modes and DMA and the leather seat option and extra chrome… But is your system actually taking advantage of these snazzy features? The hdparm(8) command will not only tell you how your drives are performing, but will let you tweak them out to your heart’s content.


Now before you get too excited, it is worth pointing out that under some circumstances, these commands CAN CAUSE UNEXPECTED DATA CORRUPTION! Use them at your own risk! At the very least, back up your box and bring it down to single-user mode before proceeding.


With the usual disclaimer out of the way, I’d like to point out that if you are using current hardware (i.e. your drive AND controller AND motherboard were manufactured in the last two or three years), you are at considerably lower risk. I’ve used these commands on several boxes with various hardware configurations, and the worst I’ve seen happen is the occasional hang, with no data problems on reboot. And no matter how much you might whine at me and the world in general for your personal misfortune, we all know who is ultimately responsible for the well-being of YOUR box: YOU ARE. Caveat Fair Reader.


Now, then. If I haven’t scared you away yet, try this (as root, preferably in single-user mode):

hdparm -Tt /dev/hda

You’ll see something like:

/dev/hda:
Timing buffer-cache reads: 128 MB in 1.34 seconds =95.52 MB/sec
Timing buffered disk reads: 64 MB in 17.86 seconds = 3.58 MB/sec

What does this tell us? The -T means to test the cache system (i.e., the memory, CPU, and buffer cache). The -t means to report stats on the disk in question, reading data not in the cache. The two together, run a couple of times in a row in single-user mode, will give you an idea of the performance of your disk I/O system. (These are actual numbers from a PII/350 / 128M Ram / newish EIDE HD; your numbers will vary.)


But even with varying numbers, 3.58 MB/sec is PATHETIC for the above hardware. I thought the ad for the HD said something about 66MB per second!!?!? What gives?


Well, let’s find out more about how Linux is addressing your drive:

hdparm /dev/hda

/dev/hda:
multcount = 0 (off)
I/O support = 0 (default 16-bit)
unmaskirq = 0 (off)
using_dma = 0 (off)
keepsettings = 0 (off)
nowerr = 0 (off)
readonly = 0 (off)
readahead = 8 (on)
geometry = 1870/255/63, sectors = 30043440, start = 0


These are the defaults. Nice, safe, but not necessarily optimal. What’s all this about 16-bit mode? I thought that went out with the 386! And why are most of the other options turned off?


Well, it’s generally considered a good idea for any self-respecting distribution to install itself in the kewlest, slickest, but SAFEST way it possibly can. The above settings are virtually guaranteed to work on any hardware you might throw at it. But since we know we’re throwing something more than a dusty, 8-year-old, 16-bit multi-IO card at it, let’s talk about the interesting options:



  • multcount: Short for multiple sector count. This controls how many sectors are fetched from the disk in a single I/O interrupt. Almost all modern IDE drives support this. The man page claims:


    When this feature is enabled, it typically reduces operating system overhead for disk I/O by 30-50%. On many systems, it also provides increased data throughput of anywhere from 5% to 50%.

  • I/O support: This is a big one. This flag controls how data is passed from the PCI bus to the controller. Almost all modern controller chipsets support mode 3, or 32-bit mode w/sync. Some even support 32-bit async. Turning this on will almost certainly double your throughput (see below.)


  • unmaskirq: Turning this on will allow Linux to unmask other interrupts while processing a disk interrupt. What does that mean? It lets Linux attend to other interrupt-related tasks (i.e., network traffic) while waiting for your disk to return with the data it asked for. It should improve overall system response time, but be warned: Not all hardware configurations will be able to handle it. See the manpage.


  • using_dma: DMA can be a tricky business. If you can get your controller and drive using a DMA mode, do it. But I have seen more than one machine hang while playing with this option. Again, see the manpage (and the example on the next page)!

 


Turbocharged


So, since we have our system in single-user mode like a good little admin, let’s try out some turbo settings:



hdparm -c3 -m16 /dev/hda

/dev/hda:
setting 32-bit I/O support flag to 3
setting multcount to 16
multcount = 16 (on)
I/O support = 3 (32-bit w/sync)


Great! 32-bit sounds nice. And some multi-reads might work. Let’s re-run the benchmark:

hdparm -tT /dev/hda


/dev/hda:
Timing buffer-cache reads: 128 MB in 1.41 seconds =90.78 MB/sec
Timing buffered disk reads: 64 MB in 9.84 seconds = 6.50 MB/sec


WOW! Almost double the disk throughput without really trying! Incredible.


But wait, there’s more: We’re still not unmasking interrupts, using DMA, or even a using decent PIO mode! Of course, enabling these gets riskier. (Why is it always a trade-off between freedom and security?) The man page mentions trying Multiword DMA mode2, so:

hdparm -X34 -d1 -u1 /dev/hda

…Unfortunately this seems to be unsupported on this particular box (it hung like an NT box running a Java app.) So, after rebooting it (again in single-user mode), I went with this:

hdparm -X66 -d1 -u1 -m16 -c3 /dev/hda

/dev/hda:
setting 32-bit I/O support flag to 3
setting multcount to 16
setting unmaskirq to 1 (on)
setting using_dma to 1 (on)
setting xfermode to 66 (UltraDMA mode2)
multcount = 16 (on)
I/O support = 3 (32-bit w/sync)
unmaskirq = 1 (on)
using_dma = 1 (on)


And then checked:

hdparm -tT /dev/hda

/dev/hda:
Timing buffer-cache reads: 128 MB in 1.43 seconds =89.51 MB/sec
Timing buffered disk reads: 64 MB in 3.18 seconds =20.13 MB/sec


20.13 MB/sec. A far cry from the miniscule 3.58 we started with…


By the way, notice how we specified the -m16 and -c3 switch again? That’s because it doesn’t remember your hdparm settings between reboots. Be sure to add the above line (not the test line with -tT flags!) to your /etc/rc.d/* scripts once you’re sure the system is stable (and preferably after your fsck runs; having an extensive fs check run with your controller in a flaky mode may be a good way to generate vast quantities of entropy, but it’s no way to administer a system. At least not with a straight face…)


Now, after running the benchmark a few more times, reboot in multi-user mode and fire up X. Load Netscape. And try not to fall out of your chair.


In conclusion


This is one of those interesting little tidbits that escapes many “seasoned” Linux veterans, especially since one never sees any indication that the system isn’t using the most optimal settings. (Gee, all my kernel messages have looked fine….) And using hdparm isn’t completely without risk, but is well worth investigating.


And it doesn’t stop at performance: hdparm lets you adjust various power saving modes as well. See the hdparm(8) for the final word.


Many thanks to Mark Lord for putting together this nifty utility. If your particular distribution doesn’t include hdparm (usually in /sbin or /usr/sbin), get it from the source at http://metalab.unc.edu/pub/Linux/system/hardware/


Happy hacking!

The For-Each Loop

Iterating over a collection is uglier than it needs to be. Consider the following method, which takes a collection of timer tasks and cancels them:


    void cancelAll(Collection<TimerTask> c) {
        for (Iterator<TimerTask> i = c.iterator(); i.hasNext(); )
            i.next().cancel();
    }


The iterator is just clutter. Furthermore, it is an opportunity for error. The iterator variable occurs three times in each loop: that is two chances to get it wrong. The for-each construct gets rid of the clutter and the opportunity for error. Here is how the example looks with the for-each construct:


    void cancelAll(Collection<TimerTask> c) {
        for (TimerTask t : c)
            t.cancel();
    }


When you see the colon (:) read it as “in.” The loop above reads as “for each TimerTask t in c.” As you can see, the for-each construct combines beautifully with generics. It preserves all of the type safety, while removing the remaining clutter. Because you don’t have to declare the iterator, you don’t have to provide a generic declaration for it. (The compiler does this for you behind your back, but you need not concern yourself with it.)


Here is a common mistake people make when they are trying to do nested iteration over two collections:


    List suits = …;
    List ranks = …;
    List sortedDeck = new ArrayList();


    // BROKEN – throws NoSuchElementException!
    for (Iterator i = suits.iterator(); i.hasNext(); )
        for (Iterator j = ranks.iterator(); j.hasNext(); )
            sortedDeck.add(new Card(i.next(), j.next()));


Can you spot the bug? Don’t feel bad if you can’t. Many expert programmers have made this mistake at one time or another. The problem is that the next method is being called too many times on the “outer” collection (suits). It is being called in the inner loop for both the outer and inner collections, which is wrong. In order to fix it, you have to add a variable in the scope of the outer loop to hold the suit:


    // Fixed, though a bit ugly
    for (Iterator i = suits.iterator(); i.hasNext(); ) {
        Suit suit = (Suit) i.next();
        for (Iterator j = ranks.iterator(); j.hasNext(); )
            sortedDeck.add(new Card(suit, j.next()));
    }


So what does all this have to do with the for-each construct? It is tailor-made for nested iteration! Feast your eyes:


    for (Suit suit : suits)
        for (Rank rank : ranks)
            sortedDeck.add(new Card(suit, rank));


The for-each construct is also applicable to arrays, where it hides the index variable rather than the iterator. The following method returns the sum of the values in an int array:


    // Returns the sum of the elements of a
    int sum(int[] a) {
        int result = 0;
        for (int i : a)
            result += i;
        return result;
    }


So when should you use the for-each loop? Any time you can. It really beautifies your code. Unfortunately, you cannot use it everywhere. Consider, for example, the expurgate method. The program needs access to the iterator in order to remove the current element. The for-each loop hides the iterator, so you cannot call remove. Therefore, the for-each loop is not usable for filtering. Similarly it is not usable for loops where you need to replace elements in a list or array as you traverse it. Finally, it is not usable for loops that must iterate over multiple collections in parallel. These shortcomings were known by the designers, who made a conscious decision to go with a clean, simple construct that would cover the great majority of cases.

Using Enhanced For-Loops with Your Classes

The enhanced for-loop is a popular feature introduced with the Java SE platform in version 5.0. Its simple structure allows one to simplify code by presenting for-loops that visit each element of an array/collection without explicitly expressing how one goes from element to element.


Because the old style of coding didn’t become invalid with the new for-loop syntax, you don’t have to use an enhanced for-loop when visiting each element of an array/collection. However, with the new style, one’s code would typically change from something like the following:


for (int i=0; i<array.length; i++) {
    System.out.println(“Element: ” + array[i]);
}

to the newer form:

for (String element : array) {
    System.out.println(“Element: ” + element);
}


Assuming “array” is defined to be an array of String objects, each element is assigned to the element variable as it loops through the array. These basics of the enhanced for-loop were covered in an earlier Tech Tip: The Enhanced For Loop, from May 5, 2005.


If you have a class called Colony which contains a group of Penguin objects, without doing anything extra to get the enhanced for-loop to work, one way you would loop through each penguin element would be to return an Iterator and iterate through the colony. Unfortunately, the enhanced for-loop does not work with Iterator , so the following won’t even compile:


// Does not compile
import java.util.*;
public class BadColony {
  static class Penguin {
    String name;
    Penguin(String name) {
      this.name = name;
    }
    public String toString() {
      return “Penguin{” + name + “}”;
    }
  }

  Set<Penguin> set = new HashSet<Penguin>();

  public void addPenguin(Penguin p) {
    set.add(p);
  }

  public Iterator<Penguin> getPenguins() {
    return set.iterator();
  }

  public static void main(String args[]) {
    Colony colony = new Colony();
    Penguin opus = new Penguin(“Opus”);
    Penguin chilly = new Penguin(“Chilly Willy”);
    Penguin mumble = new Penguin(“Mumble”);
    Penguin emperor = new Penguin(“Emperor”);
    colony.addPenguin(opus);
    colony.addPenguin(chilly);
    colony.addPenguin(mumble);
    colony.addPenguin(emperor);
    Iterator<Penguin> it = colony.getPenguins();
// The bad line of code:
    for (Penguin p : it) {
      System.out.println(p);
    }
  }
}

You cannot just pass an Iterator into the enhanced for-loop. The 2nd line of the following will generate a compilation error:

    Iterator<Penguin> it = colony.getPenguins();
    for (Penguin p : it) {

The error:

BadColony.java:36: foreach not applicable to expression type
    for (Penguin p : it) {
                     ^
1 error

In order to be able to use your class with an enhanced for-loop, it does need an Iterator , but that Iterator must be provided via the Iterable interface:


public interface java.lang.Iterable {
    public java.util.Iterator iterator();
}

Actually, to be more correct, you can use a generic T , allowing the enhanced for-loop to avoid casting, returning the designated generic type, instead of just a plain old Object .

public interface java.lang.Iterable<T> {
    public java.util.Iterator<T> iterator();
}

It is this Iterable object which is then provided to the enhanced for-loop. By making the Colony class implement Iterable , and having its new iterator() method return the Iterator that getPenguins() provides, you’ll be able to loop through the penguins in the colony via an enhanced for-loop.


By adding the proper implements clause:

public class Colony implements Iterable<Colony.Penguin> {


You then get your enhanced for-loop for the colony:

    for (Penguin p : colony) {

Here’s the updated Colony  class with the corrected code:

import java.util.*;

public class Colony implements Iterable<Colony.Penguin> {

  static class Penguin {
    String name;
    Penguin(String name) {
      this.name = name;
    }
    public String toString() {
      return “Penguin{” + name + “}”;
    }
  }

  Set<Penguin> set = new HashSet<Penguin>();

  public void addPenguin(Penguin p) {
    set.add(p);
  }

  public Iterator<Penguin> getPenguins() {
    return set.iterator();
  }

  public Iterator<Penguin> iterator() {
    return getPenguins();
  }

  public static void main(String args[]) {
    Colony colony = new Colony();
    Penguin opus = new Penguin(“Opus”);
    Penguin chilly = new Penguin(“Chilly Willy”);
    Penguin mumble = new Penguin(“Mumble”);
    Penguin emperor = new Penguin(“Emperor”);
    colony.addPenguin(opus);
    colony.addPenguin(chilly);
    colony.addPenguin(mumble);
    colony.addPenguin(emperor);
    for (Penguin p : colony) {
      System.out.println(p);
    }
  }
}

Running the code produces the following output:

  > java Colony

  Penguin{Chilly Willy}
  Penguin{Mumble}
  Penguin{Opus}
  Penguin{Emperor}

Keep in mind that the individual penguins are internally kept in a Set type collection so the returned order doesn’t necessarily match the insertion order, which in this case it doesn’t.


Remember to genericize the implements clause for the class “implements Iterable<T> ” and not just say “implements Iterable “. With the latter, the enhanced for-loop will only return an Object for each element.


For more information on the enhanced for-loop, please see the Java Programming Language guide from JDK 1.5.