Add a synthetic missed optimization.

llvm-svn: 60186
diff --git a/llvm/lib/Target/README.txt b/llvm/lib/Target/README.txt
index c4161c3..f6f24eb 100644
--- a/llvm/lib/Target/README.txt
+++ b/llvm/lib/Target/README.txt
@@ -984,3 +984,27 @@
 }
 
 //===---------------------------------------------------------------------===//
+
+These three functions all perform the same computation, but produce different
+assembly. On x86, they are sorted from slowest to fastest.
+
+define i8 @udiv(i8 %x) readnone nounwind {
+  %A = udiv i8 %x, 250
+  ret i8 %A
+}
+
+define i8 @select(i8 %x) readnone nounwind {
+  %A = icmp ult i8 %x, 250
+  %B = select i1 %A, i8 0, i8 1
+  ret i8 %B 
+}
+
+define i8 @addshr(i8 %x) readnone nounwind {
+  %A = zext i8 %x to i9
+  %B = add i9 %A, 6       ;; 256 - 250 == 6
+  %C = lshr i9 %B, 8
+  %D = trunc i9 %C to i8
+  ret i8 %D
+}
+
+//===---------------------------------------------------------------------===//