/* -*- mode: java; c-basic-offset: 2; indent-tabs-mode: nil -*- */
/*
Part of the Processing project - http://processing.org
Copyright (c) 2004-11 Ben Fry and Casey Reas
Copyright (c) 2001-04 Massachusetts Institute of Technology
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with this library; if not, write to the
Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
*/
package processing.core;
import java.awt.*;
import java.awt.image.*;
import java.io.*;
import java.util.Iterator;
import javax.imageio.*;
import javax.imageio.metadata.*;
/**
* ( begin auto-generated from PImage.xml )
*
* Datatype for storing images. Processing can display .gif,
* .jpg, .tga, and .png images. Images may be
* displayed in 2D and 3D space. Before an image is used, it must be loaded
* with the loadImage() function. The PImage class contains
* fields for the width and height of the image, as well as
* an array called pixels[] that contains the values for every pixel
* in the image. The methods described below allow easy access to the
* image's pixels and alpha channel and simplify the process of compositing.
* If the image is in RGB format (i.e. on a PVideo object),
* the value will get its high bits set, just to avoid cases where
* they haven't been set already.
*
* If the image is in ALPHA format, this returns a white with its
* alpha value set.
*
* This function is included primarily for beginners. It is quite
* slow because it has to check to see if the x, y that was provided
* is inside the bounds, and then has to check to see what image
* type it is. If you want things to be more efficient, access the
* pixels[] array directly.
*
* @webref image:pixels
* @brief Reads the color of any pixel or grabs a rectangle of pixels
* @usage web_application
* @param x x-coordinate of the pixel
* @param y y-coordinate of the pixel
* @see PApplet#set(int, int, int)
* @see PApplet#pixels
* @see PApplet#copy(PImage, int, int, int, int, int, int, int, int)
*/
public int get(int x, int y) {
if ((x < 0) || (y < 0) || (x >= width) || (y >= height)) return 0;
switch (format) {
case RGB:
return pixels[y*width + x] | 0xff000000;
case ARGB:
return pixels[y*width + x];
case ALPHA:
return (pixels[y*width + x] << 24) | 0xffffff;
}
return 0;
}
/**
* @param w width of pixel rectangle to get
* @param h height of pixel rectangle to get
*/
public PImage get(int x, int y, int w, int h) {
int targetX = 0;
int targetY = 0;
int targetWidth = w;
int targetHeight = h;
boolean cropped = false;
if (x < 0) {
w += x; // x is negative, removes the left edge from the width
targetX = -x;
cropped = true;
x = 0;
}
if (y < 0) {
h += y; // y is negative, clip the number of rows
targetY = -y;
cropped = true;
y = 0;
}
if (x + w > width) {
w = width - x;
cropped = true;
}
if (y + h > height) {
h = height - y;
cropped = true;
}
if (w < 0) {
w = 0;
}
if (h < 0) {
h = 0;
}
int targetFormat = format;
if (cropped && format == RGB) {
targetFormat = ARGB;
}
PImage target = new PImage(targetWidth, targetHeight, targetFormat);
target.parent = parent; // parent may be null so can't use createImage()
if (w > 0 && h > 0) {
getImpl(x, y, w, h, target, targetX, targetY);
}
return target;
}
/**
* Returns a copy of this PImage. Equivalent to get(0, 0, width, height).
*/
public PImage get() {
// Formerly this used clone(), which caused memory problems.
// http://code.google.com/p/processing/issues/detail?id=42
return get(0, 0, width, height);
}
/**
* Internal function to actually handle getting a block of pixels that
* has already been properly cropped to a valid region. That is, x/y/w/h
* are guaranteed to be inside the image space, so the implementation can
* use the fastest possible pixel copying method.
*/
protected void getImpl(int sourceX, int sourceY,
int sourceWidth, int sourceHeight,
PImage target, int targetX, int targetY) {
int sourceIndex = sourceY*width + sourceX;
int targetIndex = targetY*target.width + targetX;
for (int row = 0; row < sourceHeight; row++) {
System.arraycopy(pixels, sourceIndex, target.pixels, targetIndex, sourceWidth);
sourceIndex += width;
targetIndex += target.width;
}
}
/**
* ( begin auto-generated from PImage_set.xml )
*
* Changes the color of any pixel or writes an image directly into the
* display window.
* Strictly speaking the "blue" value from the source image is
* used as the alpha color. For a fully grayscale image, this
* is correct, but for a color image it's not 100% accurate.
* For a more accurate conversion, first use filter(GRAY)
* which will make the image into a "correct" grayscale by
* performing a proper luminance-based conversion.
*
* @webref pimage:method
* @usage web_application
* @brief Masks part of an image with another image as an alpha channel
* @param maskArray array of integers used as the alpha channel, needs to be the same length as the image's pixel array
*/
public void mask(PImage img) {
img.loadPixels();
mask(img.pixels);
}
//////////////////////////////////////////////////////////////
// IMAGE FILTERS
public void filter(int kind) {
loadPixels();
switch (kind) {
case BLUR:
// TODO write basic low-pass filter blur here
// what does photoshop do on the edges with this guy?
// better yet.. why bother? just use gaussian with radius 1
filter(BLUR, 1);
break;
case GRAY:
if (format == ALPHA) {
// for an alpha image, convert it to an opaque grayscale
for (int i = 0; i < pixels.length; i++) {
int col = 255 - pixels[i];
pixels[i] = 0xff000000 | (col << 16) | (col << 8) | col;
}
format = RGB;
} else {
// Converts RGB image data into grayscale using
// weighted RGB components, and keeps alpha channel intact.
// [toxi 040115]
for (int i = 0; i < pixels.length; i++) {
int col = pixels[i];
// luminance = 0.3*red + 0.59*green + 0.11*blue
// 0.30 * 256 = 77
// 0.59 * 256 = 151
// 0.11 * 256 = 28
int lum = (77*(col>>16&0xff) + 151*(col>>8&0xff) + 28*(col&0xff))>>8;
pixels[i] = (col & ALPHA_MASK) | lum<<16 | lum<<8 | lum;
}
}
break;
case INVERT:
for (int i = 0; i < pixels.length; i++) {
//pixels[i] = 0xff000000 |
pixels[i] ^= 0xffffff;
}
break;
case POSTERIZE:
throw new RuntimeException("Use filter(POSTERIZE, int levels) " +
"instead of filter(POSTERIZE)");
case OPAQUE:
for (int i = 0; i < pixels.length; i++) {
pixels[i] |= 0xff000000;
}
format = RGB;
break;
case THRESHOLD:
filter(THRESHOLD, 0.5f);
break;
// [toxi20050728] added new filters
case ERODE:
dilate(true);
break;
case DILATE:
dilate(false);
break;
}
updatePixels(); // mark as modified
}
/**
* ( begin auto-generated from PImage_filter.xml )
*
* Filters an image as defined by one of the following modes:
* A useful reference for blending modes and their algorithms can be
* found in the SVG
* specification. It is important to note that Processing uses "fast" code, not
* necessarily "correct" code. No biggie, most software does. A nitpicker
* can find numerous "off by 1 division" problems in the blend code where
* >>8 or >>7 is used when strictly speaking
* /255.0 or /127.0 should have been used. For instance, exclusion (not intended for real-time use) reads
* r1 + r2 - ((2 * r1 * r2) / 255) because 255 == 1.0
* not 256 == 1.0. In other words, (255*255)>>8 is not
* the same as (255*255)/255. But for real-time use the shifts
* are preferrable, and the difference is insignificant for applications
* built with Processing.
*
using the pixels[] array, be sure to use the
* loadPixels() method on the image to make sure that the pixel data
* is properly loaded.
*
create a new image, use the createImage() function. Do not
* use the syntax new PImage().
*
* ( end auto-generated )
*
* @webref image
* @usage Web & Application
* @instanceName pimg any object of type PImage
* @see PApplet#loadImage(String)
* @see PApplet#imageMode(int)
* @see PApplet#createImage(int, int, int)
*/
public class PImage implements PConstants, Cloneable {
/**
* Format for this image, one of RGB, ARGB or ALPHA.
* note that RGB images still require 0xff in the high byte
* because of how they'll be manipulated by other functions
*/
public int format;
/**
* ( begin auto-generated from pixels.xml )
*
* Array containing the values for all the pixels in the display window.
* These values are of the color datatype. This array is the size of the
* display window. For example, if the image is 100x100 pixels, there will
* be 10000 values and if the window is 200x300 pixels, there will be 60000
* values. The index value defines the position of a value within
* the array. For example, the statement color b = pixels[230] will
* set the variable b to be equal to the value at that location in
* the array.
*
* Before accessing this array, the data must loaded with the
* loadPixels() function. After the array data has been modified,
* the updatePixels() function must be run to update the changes.
* Without loadPixels(), running the code may (or will in future
* releases) result in a NullPointerException.
*
* ( end auto-generated )
*
* @webref image:pixels
* @usage web_application
* @brief Array containing the color of every pixel in the image
*/
public int[] pixels;
/**
* ( begin auto-generated from PImage_width.xml )
*
* The width of the image in units of pixels.
*
* ( end auto-generated )
* @webref pimage:field
* @usage web_application
* @brief Image width
*/
public int width;
/**
* ( begin auto-generated from PImage_height.xml )
*
* The height of the image in units of pixels.
*
* ( end auto-generated )
* @webref pimage:field
* @usage web_application
* @brief Image height
*/
public int height;
/**
* Path to parent object that will be used with save().
* This prevents users from needing savePath() to use PImage.save().
*/
public PApplet parent;
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
/** for renderers that need to store info about the image */
//protected HashMap
* Before using the pixels[] array, be sure to use the
* loadPixels() method on the image to make sure that the pixel data
* is properly loaded.
*
* To create a new image, use the createImage() function (do not use
* new PImage()).
* ( end auto-generated )
* @nowebref
* @usage web_application
* @see PApplet#loadImage(String, String)
* @see PApplet#imageMode(int)
* @see PApplet#createImage(int, int, int)
*/
public PImage() {
format = ARGB; // default to ARGB images for release 0116
// cache = null;
}
/**
* @nowebref
* @param width image width
* @param height image height
*/
public PImage(int width, int height) {
init(width, height, RGB);
// toxi: is it maybe better to init the image with max alpha enabled?
//for(int i=0; i
renderers may or may not seem to require loadPixels()
* or updatePixels(). However, the rule is that any time you want to
* manipulate the pixels[] array, you must first call
* loadPixels(), and after changes have been made, call
* updatePixels(). Even if the renderer may not seem to use this
* function in the current Processing release, this will always be subject
* to change.
*
* ( end auto-generated )
*
* Advanced
* Call this when you want to mess with the pixels[] array.
*
* For subclasses where the pixels[] buffer isn't set by default,
* this should copy all data into the pixels[] array
*
* @webref pimage:pixels
* @brief Loads the pixel data for the image into its pixels[] array
* @usage web_application
*/
public void loadPixels() { // ignore
if (pixels == null || pixels.length != width*height) {
pixels = new int[width*height];
}
setLoaded();
}
public void updatePixels() { // ignore
// updatePixelsImpl(0, 0, width, height);
updatePixels(0, 0, width, height);
}
/**
* ( begin auto-generated from PImage_updatePixels.xml )
*
* Updates the image with the data in its pixels[] array. Use in
* conjunction with loadPixels(). If you're only reading pixels from
* the array, there's no need to call updatePixels().
*
renderers may or may not seem to require loadPixels()
* or updatePixels(). However, the rule is that any time you want to
* manipulate the pixels[] array, you must first call
* loadPixels(), and after changes have been made, call
* updatePixels(). Even if the renderer may not seem to use this
* function in the current Processing release, this will always be subject
* to change.
*
* Currently, none of the renderers use the additional parameters to
* updatePixels(), however this may be implemented in the future.
*
* ( end auto-generated )
* Advanced
* Mark the pixels in this region as needing an update.
* This is not currently used by any of the renderers, however the api
* is structured this way in the hope of being able to use this to
* speed things up in the future.
* @webref pimage:pixels
* @brief Updates the image with the data in its pixels[] array
* @usage web_application
* @param x x-coordinate of the upper-left corner
* @param y y-coordinate of the upper-left corner
* @param w width
* @param h height
*/
public void updatePixels(int x, int y, int w, int h) { // ignore
// updatePixelsImpl(x, y, w, h);
// }
//
//
// /**
// * Broken out as separate impl to signify that the w/y/w/h numbers have
// * already been tested and their bounds set properly.
// */
// protected void updatePixelsImpl(int x, int y, int w, int h) {
int x2 = x + w;
int y2 = y + h;
if (!modified) {
mx1 = PApplet.max(0, x);
//mx2 = PApplet.min(width - 1, x2);
mx2 = PApplet.min(width, x2);
my1 = PApplet.max(0, y);
//my2 = PApplet.min(height - 1, y2);
my2 = PApplet.min(height, y2);
modified = true;
} else {
if (x < mx1) mx1 = PApplet.max(0, x);
//if (x > mx2) mx2 = PApplet.min(width - 1, x);
if (x > mx2) mx2 = PApplet.min(width, x);
if (y < my1) my1 = PApplet.max(0, y);
//if (y > my2) my2 = y;
if (y > my2) my2 = PApplet.min(height, y);
if (x2 < mx1) mx1 = PApplet.max(0, x2);
//if (x2 > mx2) mx2 = PApplet.min(width - 1, x2);
if (x2 > mx2) mx2 = PApplet.min(width, x2);
if (y2 < my1) my1 = PApplet.max(0, y2);
//if (y2 > my2) my2 = PApplet.min(height - 1, y2);
if (y2 > my2) my2 = PApplet.min(height, y2);
}
}
//////////////////////////////////////////////////////////////
// COPYING IMAGE DATA
/**
* Duplicate an image, returns new PImage object.
* The pixels[] array for the new object will be unique
* and recopied from the source image. This is implemented as an
* override of Object.clone(). We recommend using get() instead,
* because it prevents you from needing to catch the
* CloneNotSupportedException, and from doing a cast from the result.
*/
@Override
public Object clone() throws CloneNotSupportedException { // ignore
return get();
}
/**
* ( begin auto-generated from PImage_resize.xml )
*
* Resize the image to a new width and height. To make the image scale
* proportionally, use 0 as the value for the wide or high
* parameter. For instance, to make the width of an image 150 pixels, and
* change the height using the same proportion, use resize(150, 0).
*
* Even though a PGraphics is technically a PImage, it is not possible to
* rescale the image data found in a PGraphics. (It's simply not possible
* to do this consistently across renderers: technically infeasible with
* P3D, or what would it even do with PDF?) If you want to resize PGraphics
* content, first get a copy of its image data using the get()
* method, and call resize() on the PImage that is returned.
*
* ( end auto-generated )
* @webref pimage:method
* @brief Changes the size of an image to a new width and height
* @usage web_application
* @param w the resized image width
* @param h the resized image height
* @see PImage#get(int, int, int, int)
*/
public void resize(int w, int h) { // ignore
if (w <= 0 && h <= 0) {
throw new IllegalArgumentException("width or height must be > 0 for resize");
}
if (w == 0) { // Use height to determine relative size
float diff = (float) h / (float) height;
w = (int) (width * diff);
} else if (h == 0) { // Use the width to determine relative size
float diff = (float) w / (float) width;
h = (int) (height * diff);
}
BufferedImage img = shrinkImage((BufferedImage) getNative(), w, h);
// BufferedImage img = null;
// if (w < width && h < height) {
// img = shrinkImage((BufferedImage) getNative(), w, h);
// } else {
// img = resampleImage((BufferedImage) getNative(), w, h);
// }
PImage temp = new PImage(img);
this.width = temp.width;
this.height = temp.height;
// Get the resized pixel array
this.pixels = temp.pixels;
// Mark the pixels array as altered
updatePixels();
}
// Adapted from getFasterScaledInstance() method from page 111 of
// "Filthy Rich Clients" by Chet Haase and Romain Guy
// Additional modifications and simplifications have been added,
// plus a fix to deal with an infinite loop if images are expanded.
// http://code.google.com/p/processing/issues/detail?id=1463
static private BufferedImage shrinkImage(BufferedImage img,
int targetWidth, int targetHeight) {
int type = (img.getTransparency() == Transparency.OPAQUE) ?
BufferedImage.TYPE_INT_RGB : BufferedImage.TYPE_INT_ARGB;
BufferedImage outgoing = img;
BufferedImage scratchImage = null;
Graphics2D g2 = null;
int prevW = outgoing.getWidth();
int prevH = outgoing.getHeight();
boolean isTranslucent = img.getTransparency() != Transparency.OPAQUE;
// Use multi-step technique: start with original size, then scale down in
// multiple passes with drawImage() until the target size is reached
int w = img.getWidth();
int h = img.getHeight();
do {
if (w > targetWidth) {
w /= 2;
// if this is the last step, do the exact size
if (w < targetWidth) {
w = targetWidth;
}
} else if (targetWidth >= w) {
w = targetWidth;
}
if (h > targetHeight) {
h /= 2;
if (h < targetHeight) {
h = targetHeight;
}
} else if (targetHeight >= h) {
h = targetHeight;
}
if (scratchImage == null || isTranslucent) {
// Use a single scratch buffer for all iterations and then copy
// to the final, correctly-sized image before returning
scratchImage = new BufferedImage(w, h, type);
g2 = scratchImage.createGraphics();
}
g2.setRenderingHint(RenderingHints.KEY_INTERPOLATION,
RenderingHints.VALUE_INTERPOLATION_BILINEAR);
g2.drawImage(outgoing, 0, 0, w, h, 0, 0, prevW, prevH, null);
prevW = w;
prevH = h;
outgoing = scratchImage;
} while (w != targetWidth || h != targetHeight);
if (g2 != null) {
g2.dispose();
}
// If we used a scratch buffer that is larger than our target size,
// create an image of the right size and copy the results into it
if (targetWidth != outgoing.getWidth() ||
targetHeight != outgoing.getHeight()) {
scratchImage = new BufferedImage(targetWidth, targetHeight, type);
g2 = scratchImage.createGraphics();
g2.drawImage(outgoing, 0, 0, null);
g2.dispose();
outgoing = scratchImage;
}
return outgoing;
}
//////////////////////////////////////////////////////////////
// MARKING IMAGE AS LOADED / FOR USE IN RENDERERS
public boolean isLoaded() { // ignore
return loaded;
}
public void setLoaded() { // ignore
loaded = true;
}
public void setLoaded(boolean l) { // ignore
loaded = l;
}
//////////////////////////////////////////////////////////////
// GET/SET PIXELS
/**
* ( begin auto-generated from PImage_get.xml )
*
* Reads the color of any pixel or grabs a section of an image. If no
* parameters are specified, the entire image is returned. Use the x
* and y parameters to get the value of one pixel. Get a section of
* the display window by specifying an additional width and
* height parameter. When getting an image, the x and
* y parameters define the coordinates for the upper-left corner of
* the image, regardless of the current imageMode().
*
* If the pixel requested is outside of the image window, black is
* returned. The numbers returned are scaled according to the current color
* ranges, but only RGB values are returned by this function. For example,
* even though you may have drawn a shape with colorMode(HSB), the
* numbers returned will be in RGB format.
*
* Getting the color of a single pixel with get(x, y) is easy, but
* not as fast as grabbing the data directly from pixels[]. The
* equivalent statement to get(x, y) using pixels[] is
* pixels[y*width+x]. See the reference for pixels[] for more information.
*
* ( end auto-generated )
*
* Advanced
* Returns an ARGB "color" type (a packed 32 bit int with the color.
* If the coordinate is outside the image, zero is returned
* (black, but completely transparent).
*
*
* The x and y parameters specify the pixel to change and the
* color parameter specifies the color value. The color parameter is
* affected by the current color mode (the default is RGB values from 0 to
* 255). When setting an image, the x and y parameters define
* the coordinates for the upper-left corner of the image, regardless of
* the current imageMode().
*
* Setting the color of a single pixel with set(x, y) is easy, but
* not as fast as putting the data directly into pixels[]. The
* equivalent statement to set(x, y, #000000) using pixels[]
* is pixels[y*width+x] = #000000. See the reference for
* pixels[] for more information.
*
* ( end auto-generated )
*
* @webref image:pixels
* @brief writes a color to any pixel or writes an image into another
* @usage web_application
* @param x x-coordinate of the pixel
* @param y y-coordinate of the pixel
* @param c any value of the color datatype
* @see PImage#get(int, int, int, int)
* @see PImage#pixels
* @see PImage#copy(PImage, int, int, int, int, int, int, int, int)
*/
public void set(int x, int y, int c) {
if ((x < 0) || (y < 0) || (x >= width) || (y >= height)) return;
pixels[y*width + x] = c;
//updatePixelsImpl(x, y, 1, 1); // slow?
updatePixels(x, y, 1, 1); // slow?
}
/**
* Advanced
* Efficient method of drawing an image's pixels directly to this surface.
* No variations are employed, meaning that any scale, tint, or imageMode
* settings will be ignored.
*
* @param img image to copy into the original image
*/
public void set(int x, int y, PImage img) {
int sx = 0;
int sy = 0;
int sw = img.width;
int sh = img.height;
if (x < 0) { // off left edge
sx -= x;
sw += x;
x = 0;
}
if (y < 0) { // off top edge
sy -= y;
sh += y;
y = 0;
}
if (x + sw > width) { // off right edge
sw = width - x;
}
if (y + sh > height) { // off bottom edge
sh = height - y;
}
// this could be nonexistent
if ((sw <= 0) || (sh <= 0)) return;
setImpl(img, sx, sy, sw, sh, x, y);
}
/**
* Internal function to actually handle setting a block of pixels that
* has already been properly cropped from the image to a valid region.
*/
protected void setImpl(PImage sourceImage,
int sourceX, int sourceY,
int sourceWidth, int sourceHeight,
int targetX, int targetY) {
int sourceOffset = sourceY * sourceImage.width + sourceX;
int targetOffset = targetY * width + targetX;
for (int y = sourceY; y < sourceY + sourceHeight; y++) {
System.arraycopy(sourceImage.pixels, sourceOffset, pixels, targetOffset, sourceWidth);
sourceOffset += sourceImage.width;
targetOffset += width;
}
//updatePixelsImpl(targetX, targetY, sourceWidth, sourceHeight);
updatePixels(targetX, targetY, sourceWidth, sourceHeight);
}
//////////////////////////////////////////////////////////////
// ALPHA CHANNEL
@Deprecated
public void mask(int maskArray[]) { // ignore
loadPixels();
// don't execute if mask image is different size
if (maskArray.length != pixels.length) {
throw new RuntimeException("mask() can only be used with an image that's the same size.");
}
for (int i = 0; i < pixels.length; i++) {
pixels[i] = ((maskArray[i] & 0xff) << 24) | (pixels[i] & 0xffffff);
}
format = ARGB;
updatePixels();
}
/**
* ( begin auto-generated from PImage_mask.xml )
*
* Masks part of an image from displaying by loading another image and
* using it as an alpha channel. This mask image should only contain
* grayscale data, but only the blue color channel is used. The mask image
* needs to be the same size as the image to which it is applied.
*
* In addition to using a mask image, an integer array containing the alpha
* channel data can be specified directly. This method is useful for
* creating dynamically generated alpha masks. This array must be of the
* same length as the target image's pixels array and should contain only
* grayscale data of values between 0-255.
*
* ( end auto-generated )
*
* Advanced
*
* Set alpha channel for an image. Black colors in the source
* image will make the destination image completely transparent,
* and white will make things fully opaque. Gray values will
* be in-between steps.
*
THRESHOLD - converts the image to black and white pixels depending if
* they are above or below the threshold defined by the level parameter.
* The level must be between 0.0 (black) and 1.0(white). If no level is
* specified, 0.5 is used.
*
* GRAY - converts any colors in the image to grayscale equivalents
*
* INVERT - sets each pixel to its inverse value
*
* POSTERIZE - limits each channel of the image to the number of colors
* specified as the level parameter
*
* BLUR - executes a Guassian blur with the level parameter specifying the
* extent of the blurring. If no level parameter is used, the blur is
* equivalent to Guassian blur of radius 1
*
* OPAQUE - sets the alpha channel to entirely opaque
*
* ERODE - reduces the light areas with the amount defined by the level
* parameter
*
* DILATE - increases the light areas with the amount defined by the level parameter
*
* ( end auto-generated )
*
* Advanced
* Method to apply a variety of basic filters to this image.
*
*
* Luminance conversion code contributed by
* toxi
*
* Gaussian blur code contributed by
* Mario Klingemann
*
* @webref image:pixels
* @brief Converts the image to grayscale or black and white
* @usage web_application
* @param kind Either THRESHOLD, GRAY, OPAQUE, INVERT, POSTERIZE, BLUR, ERODE, or DILATE
* @param param unique for each, see above
*/
public void filter(int kind, float param) {
loadPixels();
switch (kind) {
case BLUR:
if (format == ALPHA)
blurAlpha(param);
else if (format == ARGB)
blurARGB(param);
else
blurRGB(param);
break;
case GRAY:
throw new RuntimeException("Use filter(GRAY) instead of " +
"filter(GRAY, param)");
case INVERT:
throw new RuntimeException("Use filter(INVERT) instead of " +
"filter(INVERT, param)");
case OPAQUE:
throw new RuntimeException("Use filter(OPAQUE) instead of " +
"filter(OPAQUE, param)");
case POSTERIZE:
int levels = (int)param;
if ((levels < 2) || (levels > 255)) {
throw new RuntimeException("Levels must be between 2 and 255 for " +
"filter(POSTERIZE, levels)");
}
int levels1 = levels - 1;
for (int i = 0; i < pixels.length; i++) {
int rlevel = (pixels[i] >> 16) & 0xff;
int glevel = (pixels[i] >> 8) & 0xff;
int blevel = pixels[i] & 0xff;
rlevel = (((rlevel * levels) >> 8) * 255) / levels1;
glevel = (((glevel * levels) >> 8) * 255) / levels1;
blevel = (((blevel * levels) >> 8) * 255) / levels1;
pixels[i] = ((0xff000000 & pixels[i]) |
(rlevel << 16) |
(glevel << 8) |
blevel);
}
break;
case THRESHOLD: // greater than or equal to the threshold
int thresh = (int) (param * 255);
for (int i = 0; i < pixels.length; i++) {
int max = Math.max((pixels[i] & RED_MASK) >> 16,
Math.max((pixels[i] & GREEN_MASK) >> 8,
(pixels[i] & BLUE_MASK)));
pixels[i] = (pixels[i] & ALPHA_MASK) |
((max < thresh) ? 0x000000 : 0xffffff);
}
break;
// [toxi20050728] added new filters
case ERODE:
throw new RuntimeException("Use filter(ERODE) instead of " +
"filter(ERODE, param)");
case DILATE:
throw new RuntimeException("Use filter(DILATE) instead of " +
"filter(DILATE, param)");
}
updatePixels(); // mark as modified
}
/**
* Optimized code for building the blur kernel.
* further optimized blur code (approx. 15% for radius=20)
* bigger speed gains for larger radii (~30%)
* added support for various image types (ALPHA, RGB, ARGB)
* [toxi 050728]
*/
protected void buildBlurKernel(float r) {
int radius = (int) (r * 3.5f);
radius = (radius < 1) ? 1 : ((radius < 248) ? radius : 248);
if (blurRadius != radius) {
blurRadius = radius;
blurKernelSize = 1 + blurRadius<<1;
blurKernel = new int[blurKernelSize];
blurMult = new int[blurKernelSize][256];
int bk,bki;
int[] bm,bmi;
for (int i = 1, radiusi = radius - 1; i < radius; i++) {
blurKernel[radius+i] = blurKernel[radiusi] = bki = radiusi * radiusi;
bm=blurMult[radius+i];
bmi=blurMult[radiusi--];
for (int j = 0; j < 256; j++)
bm[j] = bmi[j] = bki*j;
}
bk = blurKernel[radius] = radius * radius;
bm = blurMult[radius];
for (int j = 0; j < 256; j++)
bm[j] = bk*j;
}
}
protected void blurAlpha(float r) {
int sum, cb;
int read, ri, ym, ymi, bk0;
int b2[] = new int[pixels.length];
int yi = 0;
buildBlurKernel(r);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
//cb = cg = cr = sum = 0;
cb = sum = 0;
read = x - blurRadius;
if (read<0) {
bk0=-read;
read=0;
} else {
if (read >= width)
break;
bk0=0;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (read >= width)
break;
int c = pixels[read + yi];
int[] bm=blurMult[i];
cb += bm[c & BLUE_MASK];
sum += blurKernel[i];
read++;
}
ri = yi + x;
b2[ri] = cb / sum;
}
yi += width;
}
yi = 0;
ym=-blurRadius;
ymi=ym*width;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
//cb = cg = cr = sum = 0;
cb = sum = 0;
if (ym<0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= height)
break;
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (ri >= height)
break;
int[] bm=blurMult[i];
cb += bm[b2[read]];
sum += blurKernel[i];
ri++;
read += width;
}
pixels[x+yi] = (cb/sum);
}
yi += width;
ymi += width;
ym++;
}
}
protected void blurRGB(float r) {
int sum, cr, cg, cb; //, k;
int /*pixel,*/ read, ri, /*roff,*/ ym, ymi, /*riw,*/ bk0;
int r2[] = new int[pixels.length];
int g2[] = new int[pixels.length];
int b2[] = new int[pixels.length];
int yi = 0;
buildBlurKernel(r);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
cb = cg = cr = sum = 0;
read = x - blurRadius;
if (read<0) {
bk0=-read;
read=0;
} else {
if (read >= width)
break;
bk0=0;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (read >= width)
break;
int c = pixels[read + yi];
int[] bm=blurMult[i];
cr += bm[(c & RED_MASK) >> 16];
cg += bm[(c & GREEN_MASK) >> 8];
cb += bm[c & BLUE_MASK];
sum += blurKernel[i];
read++;
}
ri = yi + x;
r2[ri] = cr / sum;
g2[ri] = cg / sum;
b2[ri] = cb / sum;
}
yi += width;
}
yi = 0;
ym=-blurRadius;
ymi=ym*width;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
cb = cg = cr = sum = 0;
if (ym<0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= height)
break;
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (ri >= height)
break;
int[] bm=blurMult[i];
cr += bm[r2[read]];
cg += bm[g2[read]];
cb += bm[b2[read]];
sum += blurKernel[i];
ri++;
read += width;
}
pixels[x+yi] = 0xff000000 | (cr/sum)<<16 | (cg/sum)<<8 | (cb/sum);
}
yi += width;
ymi += width;
ym++;
}
}
protected void blurARGB(float r) {
int sum, cr, cg, cb, ca;
int /*pixel,*/ read, ri, /*roff,*/ ym, ymi, /*riw,*/ bk0;
int wh = pixels.length;
int r2[] = new int[wh];
int g2[] = new int[wh];
int b2[] = new int[wh];
int a2[] = new int[wh];
int yi = 0;
buildBlurKernel(r);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
cb = cg = cr = ca = sum = 0;
read = x - blurRadius;
if (read<0) {
bk0=-read;
read=0;
} else {
if (read >= width)
break;
bk0=0;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (read >= width)
break;
int c = pixels[read + yi];
int[] bm=blurMult[i];
ca += bm[(c & ALPHA_MASK) >>> 24];
cr += bm[(c & RED_MASK) >> 16];
cg += bm[(c & GREEN_MASK) >> 8];
cb += bm[c & BLUE_MASK];
sum += blurKernel[i];
read++;
}
ri = yi + x;
a2[ri] = ca / sum;
r2[ri] = cr / sum;
g2[ri] = cg / sum;
b2[ri] = cb / sum;
}
yi += width;
}
yi = 0;
ym=-blurRadius;
ymi=ym*width;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
cb = cg = cr = ca = sum = 0;
if (ym<0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= height)
break;
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (ri >= height)
break;
int[] bm=blurMult[i];
ca += bm[a2[read]];
cr += bm[r2[read]];
cg += bm[g2[read]];
cb += bm[b2[read]];
sum += blurKernel[i];
ri++;
read += width;
}
pixels[x+yi] = (ca/sum)<<24 | (cr/sum)<<16 | (cg/sum)<<8 | (cb/sum);
}
yi += width;
ymi += width;
ym++;
}
}
/**
* Generic dilate/erode filter using luminance values
* as decision factor. [toxi 050728]
*/
protected void dilate(boolean isInverted) {
int currIdx=0;
int maxIdx=pixels.length;
int[] out=new int[maxIdx];
if (!isInverted) {
// erosion (grow light areas)
while (currIdx
* As of release 0149, this function ignores imageMode().
*
* ( end auto-generated )
*
* @webref image:pixels
* @brief Copies the entire image
* @usage web_application
* @param sx X coordinate of the source's upper left corner
* @param sy Y coordinate of the source's upper left corner
* @param sw source image width
* @param sh source image height
* @param dx X coordinate of the destination's upper left corner
* @param dy Y coordinate of the destination's upper left corner
* @param dw destination image width
* @param dh destination image height
* @see PGraphics#alpha(int)
* @see PImage#blend(PImage, int, int, int, int, int, int, int, int, int)
*/
public void copy(int sx, int sy, int sw, int sh,
int dx, int dy, int dw, int dh) {
blend(this, sx, sy, sw, sh, dx, dy, dw, dh, REPLACE);
}
/**
* @param src an image variable referring to the source image.
*/
public void copy(PImage src,
int sx, int sy, int sw, int sh,
int dx, int dy, int dw, int dh) {
blend(src, sx, sy, sw, sh, dx, dy, dw, dh, REPLACE);
}
//////////////////////////////////////////////////////////////
// BLEND
/**
* ( begin auto-generated from blendColor.xml )
*
* Blends two color values together based on the blending mode given as the
* MODE parameter. The possible modes are described in the reference
* for the blend() function.
*
* ( end auto-generated )
* Advanced
*
*
*
*
* BLEND - linear interpolation of colours: C = A*factor + B
*
* ADD - additive blending with white clip: C = min(A*factor + B, 255)
*
* SUBTRACT - subtractive blending with black clip: C = max(B - A*factor,
* 0)
*
* DARKEST - only the darkest colour succeeds: C = min(A*factor, B)
*
* LIGHTEST - only the lightest colour succeeds: C = max(A*factor, B)
*
* DIFFERENCE - subtract colors from underlying image.
*
* EXCLUSION - similar to DIFFERENCE, but less extreme.
*
* MULTIPLY - Multiply the colors, result will always be darker.
*
* SCREEN - Opposite multiply, uses inverse values of the colors.
*
* OVERLAY - A mix of MULTIPLY and SCREEN. Multiplies dark values,
* and screens light values.
*
* HARD_LIGHT - SCREEN when greater than 50% gray, MULTIPLY when lower.
*
* SOFT_LIGHT - Mix of DARKEST and LIGHTEST.
* Works like OVERLAY, but not as harsh.
*
* DODGE - Lightens light tones and increases contrast, ignores darks.
* Called "Color Dodge" in Illustrator and Photoshop.
*
* BURN - Darker areas are applied, increasing contrast, ignores lights.
* Called "Color Burn" in Illustrator and Photoshop.
*
* All modes use the alpha information (highest byte) of source image
* pixels as the blending factor. If the source and destination regions are
* different sizes, the image will be automatically resized to match the
* destination size. If the srcImg parameter is not used, the
* display window is used as the source image.
*
* As of release 0149, this function ignores imageMode().
*
* ( end auto-generated )
*
* @webref image:pixels
* @brief Copies a pixel or rectangle of pixels using different blending modes
* @param src an image variable referring to the source image
* @param sx X coordinate of the source's upper left corner
* @param sy Y coordinate of the source's upper left corner
* @param sw source image width
* @param sh source image height
* @param dx X coordinate of the destinations's upper left corner
* @param dy Y coordinate of the destinations's upper left corner
* @param dw destination image width
* @param dh destination image height
* @param mode Either BLEND, ADD, SUBTRACT, LIGHTEST, DARKEST, DIFFERENCE, EXCLUSION, MULTIPLY, SCREEN, OVERLAY, HARD_LIGHT, SOFT_LIGHT, DODGE, BURN
*
* @see PApplet#alpha(int)
* @see PImage#copy(PImage, int, int, int, int, int, int, int, int)
* @see PImage#blendColor(int,int,int)
*/
public void blend(PImage src,
int sx, int sy, int sw, int sh,
int dx, int dy, int dw, int dh, int mode) {
/*
if (imageMode == CORNER) { // if CORNERS, do nothing
sx2 += sx1;
sy2 += sy1;
dx2 += dx1;
dy2 += dy1;
} else if (imageMode == CENTER) {
sx1 -= sx2 / 2f;
sy1 -= sy2 / 2f;
sx2 += sx1;
sy2 += sy1;
dx1 -= dx2 / 2f;
dy1 -= dy2 / 2f;
dx2 += dx1;
dy2 += dy1;
}
*/
int sx2 = sx + sw;
int sy2 = sy + sh;
int dx2 = dx + dw;
int dy2 = dy + dh;
loadPixels();
if (src == this) {
if (intersect(sx, sy, sx2, sy2, dx, dy, dx2, dy2)) {
blit_resize(get(sx, sy, sx2 - sx, sy2 - sy),
0, 0, sx2 - sx - 1, sy2 - sy - 1,
pixels, width, height, dx, dy, dx2, dy2, mode);
} else {
// same as below, except skip the loadPixels() because it'd be redundant
blit_resize(src, sx, sy, sx2, sy2,
pixels, width, height, dx, dy, dx2, dy2, mode);
}
} else {
src.loadPixels();
blit_resize(src, sx, sy, sx2, sy2,
pixels, width, height, dx, dy, dx2, dy2, mode);
//src.updatePixels();
}
updatePixels();
}
/**
* Check to see if two rectangles intersect one another
*/
private boolean intersect(int sx1, int sy1, int sx2, int sy2,
int dx1, int dy1, int dx2, int dy2) {
int sw = sx2 - sx1 + 1;
int sh = sy2 - sy1 + 1;
int dw = dx2 - dx1 + 1;
int dh = dy2 - dy1 + 1;
if (dx1 < sx1) {
dw += dx1 - sx1;
if (dw > sw) {
dw = sw;
}
} else {
int w = sw + sx1 - dx1;
if (dw > w) {
dw = w;
}
}
if (dy1 < sy1) {
dh += dy1 - sy1;
if (dh > sh) {
dh = sh;
}
} else {
int h = sh + sy1 - dy1;
if (dh > h) {
dh = h;
}
}
return !(dw <= 0 || dh <= 0);
}
//////////////////////////////////////////////////////////////
/**
* Internal blitter/resizer/copier from toxi.
* Uses bilinear filtering if smooth() has been enabled
* 'mode' determines the blending mode used in the process.
*/
private void blit_resize(PImage img,
int srcX1, int srcY1, int srcX2, int srcY2,
int[] destPixels, int screenW, int screenH,
int destX1, int destY1, int destX2, int destY2,
int mode) {
if (srcX1 < 0) srcX1 = 0;
if (srcY1 < 0) srcY1 = 0;
if (srcX2 > img.width) srcX2 = img.width;
if (srcY2 > img.height) srcY2 = img.height;
int srcW = srcX2 - srcX1;
int srcH = srcY2 - srcY1;
int destW = destX2 - destX1;
int destH = destY2 - destY1;
boolean smooth = true; // may as well go with the smoothing these days
if (!smooth) {
srcW++; srcH++;
}
if (destW <= 0 || destH <= 0 ||
srcW <= 0 || srcH <= 0 ||
destX1 >= screenW || destY1 >= screenH ||
srcX1 >= img.width || srcY1 >= img.height) {
return;
}
int dx = (int) (srcW / (float) destW * PRECISIONF);
int dy = (int) (srcH / (float) destH * PRECISIONF);
srcXOffset = destX1 < 0 ? -destX1 * dx : srcX1 * PRECISIONF;
srcYOffset = destY1 < 0 ? -destY1 * dy : srcY1 * PRECISIONF;
if (destX1 < 0) {
destW += destX1;
destX1 = 0;
}
if (destY1 < 0) {
destH += destY1;
destY1 = 0;
}
destW = low(destW, screenW - destX1);
destH = low(destH, screenH - destY1);
int destOffset = destY1 * screenW + destX1;
srcBuffer = img.pixels;
if (smooth) {
// use bilinear filtering
iw = img.width;
iw1 = img.width - 1;
ih1 = img.height - 1;
switch (mode) {
case BLEND:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
// davbol - renamed old blend_multiply to blend_blend
destPixels[destOffset + x] =
blend_blend(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case ADD:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_add_pin(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case SUBTRACT:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_sub_pin(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case LIGHTEST:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_lightest(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case DARKEST:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_darkest(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case REPLACE:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] = filter_bilinear();
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case DIFFERENCE:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_difference(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case EXCLUSION:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_exclusion(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case MULTIPLY:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_multiply(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case SCREEN:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_screen(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case OVERLAY:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_overlay(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case HARD_LIGHT:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_hard_light(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case SOFT_LIGHT:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_soft_light(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
// davbol - proposed 2007-01-09
case DODGE:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_dodge(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case BURN:
for (int y = 0; y < destH; y++) {
filter_new_scanline();
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_burn(destPixels[destOffset + x], filter_bilinear());
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
}
} else {
// nearest neighbour scaling (++fast!)
switch (mode) {
case BLEND:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
// davbol - renamed old blend_multiply to blend_blend
destPixels[destOffset + x] =
blend_blend(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case ADD:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_add_pin(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case SUBTRACT:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_sub_pin(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case LIGHTEST:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_lightest(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case DARKEST:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_darkest(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case REPLACE:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] = srcBuffer[sY + (sX >> PRECISIONB)];
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case DIFFERENCE:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_difference(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case EXCLUSION:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_exclusion(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case MULTIPLY:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_multiply(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case SCREEN:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_screen(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case OVERLAY:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_overlay(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case HARD_LIGHT:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_hard_light(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case SOFT_LIGHT:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_soft_light(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
// davbol - proposed 2007-01-09
case DODGE:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_dodge(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
case BURN:
for (int y = 0; y < destH; y++) {
sX = srcXOffset;
sY = (srcYOffset >> PRECISIONB) * img.width;
for (int x = 0; x < destW; x++) {
destPixels[destOffset + x] =
blend_burn(destPixels[destOffset + x],
srcBuffer[sY + (sX >> PRECISIONB)]);
sX += dx;
}
destOffset += screenW;
srcYOffset += dy;
}
break;
}
}
}
private void filter_new_scanline() {
sX = srcXOffset;
fracV = srcYOffset & PREC_MAXVAL;
ifV = PREC_MAXVAL - fracV;
v1 = (srcYOffset >> PRECISIONB) * iw;
v2 = low((srcYOffset >> PRECISIONB) + 1, ih1) * iw;
}
private int filter_bilinear() {
fracU = sX & PREC_MAXVAL;
ifU = PREC_MAXVAL - fracU;
ul = (ifU * ifV) >> PRECISIONB;
ll = (ifU * fracV) >> PRECISIONB;
ur = (fracU * ifV) >> PRECISIONB;
lr = (fracU * fracV) >> PRECISIONB;
u1 = (sX >> PRECISIONB);
u2 = low(u1 + 1, iw1);
// get color values of the 4 neighbouring texels
cUL = srcBuffer[v1 + u1];
cUR = srcBuffer[v1 + u2];
cLL = srcBuffer[v2 + u1];
cLR = srcBuffer[v2 + u2];
r = ((ul*((cUL&RED_MASK)>>16) + ll*((cLL&RED_MASK)>>16) +
ur*((cUR&RED_MASK)>>16) + lr*((cLR&RED_MASK)>>16))
<< PREC_RED_SHIFT) & RED_MASK;
g = ((ul*(cUL&GREEN_MASK) + ll*(cLL&GREEN_MASK) +
ur*(cUR&GREEN_MASK) + lr*(cLR&GREEN_MASK))
>>> PRECISIONB) & GREEN_MASK;
b = (ul*(cUL&BLUE_MASK) + ll*(cLL&BLUE_MASK) +
ur*(cUR&BLUE_MASK) + lr*(cLR&BLUE_MASK))
>>> PRECISIONB;
a = ((ul*((cUL&ALPHA_MASK)>>>24) + ll*((cLL&ALPHA_MASK)>>>24) +
ur*((cUR&ALPHA_MASK)>>>24) + lr*((cLR&ALPHA_MASK)>>>24))
<< PREC_ALPHA_SHIFT) & ALPHA_MASK;
return a | r | g | b;
}
//////////////////////////////////////////////////////////////
// internal blending methods
private static int low(int a, int b) {
return (a < b) ? a : b;
}
private static int high(int a, int b) {
return (a > b) ? a : b;
}
// davbol - added peg helper, equiv to constrain(n,0,255)
private static int peg(int n) {
return (n < 0) ? 0 : ((n > 255) ? 255 : n);
}
private static int mix(int a, int b, int f) {
return a + (((b - a) * f) >> 8);
}
/////////////////////////////////////////////////////////////
// BLEND MODE IMPLEMENTIONS
private static int blend_blend(int a, int b) {
int f = (b & ALPHA_MASK) >>> 24;
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
mix(a & RED_MASK, b & RED_MASK, f) & RED_MASK |
mix(a & GREEN_MASK, b & GREEN_MASK, f) & GREEN_MASK |
mix(a & BLUE_MASK, b & BLUE_MASK, f));
}
/**
* additive blend with clipping
*/
private static int blend_add_pin(int a, int b) {
int f = (b & ALPHA_MASK) >>> 24;
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
low(((a & RED_MASK) +
((b & RED_MASK) >> 8) * f), RED_MASK) & RED_MASK |
low(((a & GREEN_MASK) +
((b & GREEN_MASK) >> 8) * f), GREEN_MASK) & GREEN_MASK |
low((a & BLUE_MASK) +
(((b & BLUE_MASK) * f) >> 8), BLUE_MASK));
}
/**
* subtractive blend with clipping
*/
private static int blend_sub_pin(int a, int b) {
int f = (b & ALPHA_MASK) >>> 24;
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
high(((a & RED_MASK) - ((b & RED_MASK) >> 8) * f),
GREEN_MASK) & RED_MASK |
high(((a & GREEN_MASK) - ((b & GREEN_MASK) >> 8) * f),
BLUE_MASK) & GREEN_MASK |
high((a & BLUE_MASK) - (((b & BLUE_MASK) * f) >> 8), 0));
}
/**
* only returns the blended lightest colour
*/
private static int blend_lightest(int a, int b) {
int f = (b & ALPHA_MASK) >>> 24;
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
high(a & RED_MASK, ((b & RED_MASK) >> 8) * f) & RED_MASK |
high(a & GREEN_MASK, ((b & GREEN_MASK) >> 8) * f) & GREEN_MASK |
high(a & BLUE_MASK, ((b & BLUE_MASK) * f) >> 8));
}
/**
* only returns the blended darkest colour
*/
private static int blend_darkest(int a, int b) {
int f = (b & ALPHA_MASK) >>> 24;
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
mix(a & RED_MASK,
low(a & RED_MASK,
((b & RED_MASK) >> 8) * f), f) & RED_MASK |
mix(a & GREEN_MASK,
low(a & GREEN_MASK,
((b & GREEN_MASK) >> 8) * f), f) & GREEN_MASK |
mix(a & BLUE_MASK,
low(a & BLUE_MASK,
((b & BLUE_MASK) * f) >> 8), f));
}
/**
* returns the absolute value of the difference of the input colors
* C = |A - B|
*/
private static int blend_difference(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = (ar > br) ? (ar-br) : (br-ar);
int cg = (ag > bg) ? (ag-bg) : (bg-ag);
int cb = (ab > bb) ? (ab-bb) : (bb-ab);
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* Cousin of difference, algorithm used here is based on a Lingo version
* found here: http://www.mediamacros.com/item/item-1006687616/
* (Not yet verified to be correct).
*/
private static int blend_exclusion(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = ar + br - ((ar * br) >> 7);
int cg = ag + bg - ((ag * bg) >> 7);
int cb = ab + bb - ((ab * bb) >> 7);
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* returns the product of the input colors
* C = A * B
*/
private static int blend_multiply(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = (ar * br) >> 8;
int cg = (ag * bg) >> 8;
int cb = (ab * bb) >> 8;
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* returns the inverse of the product of the inverses of the input colors
* (the inverse of multiply). C = 1 - (1-A) * (1-B)
*/
private static int blend_screen(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = 255 - (((255 - ar) * (255 - br)) >> 8);
int cg = 255 - (((255 - ag) * (255 - bg)) >> 8);
int cb = 255 - (((255 - ab) * (255 - bb)) >> 8);
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* returns either multiply or screen for darker or lighter values of A
* (the inverse of hard light)
* C =
* A < 0.5 : 2 * A * B
* A >=0.5 : 1 - (2 * (255-A) * (255-B))
*/
private static int blend_overlay(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = (ar < 128) ? ((ar*br)>>7) : (255-(((255-ar)*(255-br))>>7));
int cg = (ag < 128) ? ((ag*bg)>>7) : (255-(((255-ag)*(255-bg))>>7));
int cb = (ab < 128) ? ((ab*bb)>>7) : (255-(((255-ab)*(255-bb))>>7));
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* returns either multiply or screen for darker or lighter values of B
* (the inverse of overlay)
* C =
* B < 0.5 : 2 * A * B
* B >=0.5 : 1 - (2 * (255-A) * (255-B))
*/
private static int blend_hard_light(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = (br < 128) ? ((ar*br)>>7) : (255-(((255-ar)*(255-br))>>7));
int cg = (bg < 128) ? ((ag*bg)>>7) : (255-(((255-ag)*(255-bg))>>7));
int cb = (bb < 128) ? ((ab*bb)>>7) : (255-(((255-ab)*(255-bb))>>7));
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* returns the inverse multiply plus screen, which simplifies to
* C = 2AB + A^2 - 2A^2B
*/
private static int blend_soft_light(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = ((ar*br)>>7) + ((ar*ar)>>8) - ((ar*ar*br)>>15);
int cg = ((ag*bg)>>7) + ((ag*ag)>>8) - ((ag*ag*bg)>>15);
int cb = ((ab*bb)>>7) + ((ab*ab)>>8) - ((ab*ab*bb)>>15);
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* Returns the first (underlay) color divided by the inverse of
* the second (overlay) color. C = A / (255-B)
*/
private static int blend_dodge(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = (br==255) ? 255 : peg((ar << 8) / (255 - br)); // division requires pre-peg()-ing
int cg = (bg==255) ? 255 : peg((ag << 8) / (255 - bg)); // "
int cb = (bb==255) ? 255 : peg((ab << 8) / (255 - bb)); // "
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
/**
* returns the inverse of the inverse of the first (underlay) color
* divided by the second (overlay) color. C = 255 - (255-A) / B
*/
private static int blend_burn(int a, int b) {
// setup (this portion will always be the same)
int f = (b & ALPHA_MASK) >>> 24;
int ar = (a & RED_MASK) >> 16;
int ag = (a & GREEN_MASK) >> 8;
int ab = (a & BLUE_MASK);
int br = (b & RED_MASK) >> 16;
int bg = (b & GREEN_MASK) >> 8;
int bb = (b & BLUE_MASK);
// formula:
int cr = (br==0) ? 0 : 255 - peg(((255 - ar) << 8) / br); // division requires pre-peg()-ing
int cg = (bg==0) ? 0 : 255 - peg(((255 - ag) << 8) / bg); // "
int cb = (bb==0) ? 0 : 255 - peg(((255 - ab) << 8) / bb); // "
// alpha blend (this portion will always be the same)
return (low(((a & ALPHA_MASK) >>> 24) + f, 0xff) << 24 |
(peg(ar + (((cr - ar) * f) >> 8)) << 16) |
(peg(ag + (((cg - ag) * f) >> 8)) << 8) |
(peg(ab + (((cb - ab) * f) >> 8)) ) );
}
//////////////////////////////////////////////////////////////
// FILE I/O
static byte TIFF_HEADER[] = {
77, 77, 0, 42, 0, 0, 0, 8, 0, 9, 0, -2, 0, 4, 0, 0, 0, 1, 0, 0,
0, 0, 1, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 3, 0, 0, 0, 1,
0, 0, 0, 0, 1, 2, 0, 3, 0, 0, 0, 3, 0, 0, 0, 122, 1, 6, 0, 3, 0,
0, 0, 1, 0, 2, 0, 0, 1, 17, 0, 4, 0, 0, 0, 1, 0, 0, 3, 0, 1, 21,
0, 3, 0, 0, 0, 1, 0, 3, 0, 0, 1, 22, 0, 3, 0, 0, 0, 1, 0, 0, 0, 0,
1, 23, 0, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 8, 0, 8
};
static final String TIFF_ERROR =
"Error: Processing can only read its own TIFF files.";
static protected PImage loadTIFF(byte tiff[]) {
if ((tiff[42] != tiff[102]) || // width/height in both places
(tiff[43] != tiff[103])) {
System.err.println(TIFF_ERROR);
return null;
}
int width =
((tiff[30] & 0xff) << 8) | (tiff[31] & 0xff);
int height =
((tiff[42] & 0xff) << 8) | (tiff[43] & 0xff);
int count =
((tiff[114] & 0xff) << 24) |
((tiff[115] & 0xff) << 16) |
((tiff[116] & 0xff) << 8) |
(tiff[117] & 0xff);
if (count != width * height * 3) {
System.err.println(TIFF_ERROR + " (" + width + ", " + height +")");
return null;
}
// check the rest of the header
for (int i = 0; i < TIFF_HEADER.length; i++) {
if ((i == 30) || (i == 31) || (i == 42) || (i == 43) ||
(i == 102) || (i == 103) ||
(i == 114) || (i == 115) || (i == 116) || (i == 117)) continue;
if (tiff[i] != TIFF_HEADER[i]) {
System.err.println(TIFF_ERROR + " (" + i + ")");
return null;
}
}
PImage outgoing = new PImage(width, height, RGB);
int index = 768;
count /= 3;
for (int i = 0; i < count; i++) {
outgoing.pixels[i] =
0xFF000000 |
(tiff[index++] & 0xff) << 16 |
(tiff[index++] & 0xff) << 8 |
(tiff[index++] & 0xff);
}
return outgoing;
}
protected boolean saveTIFF(OutputStream output) {
// shutting off the warning, people can figure this out themselves
/*
if (format != RGB) {
System.err.println("Warning: only RGB information is saved with " +
".tif files. Use .tga or .png for ARGB images and others.");
}
*/
try {
byte tiff[] = new byte[768];
System.arraycopy(TIFF_HEADER, 0, tiff, 0, TIFF_HEADER.length);
tiff[30] = (byte) ((width >> 8) & 0xff);
tiff[31] = (byte) ((width) & 0xff);
tiff[42] = tiff[102] = (byte) ((height >> 8) & 0xff);
tiff[43] = tiff[103] = (byte) ((height) & 0xff);
int count = width*height*3;
tiff[114] = (byte) ((count >> 24) & 0xff);
tiff[115] = (byte) ((count >> 16) & 0xff);
tiff[116] = (byte) ((count >> 8) & 0xff);
tiff[117] = (byte) ((count) & 0xff);
// spew the header to the disk
output.write(tiff);
for (int i = 0; i < pixels.length; i++) {
output.write((pixels[i] >> 16) & 0xff);
output.write((pixels[i] >> 8) & 0xff);
output.write(pixels[i] & 0xff);
}
output.flush();
return true;
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
/**
* Creates a Targa32 formatted byte sequence of specified
* pixel buffer using RLE compression.
*
* As of revision 0100, this function requires an absolute path, * in order to avoid confusion. To save inside the sketch folder, * use the function savePath() from PApplet, or use saveFrame() instead. * As of revision 0116, savePath() is not needed if this object has been * created (as recommended) via createImage() or createGraphics() or * one of its neighbors. *
* As of revision 0115, when using Java 1.4 and later, you can write
* to several formats besides tga and tiff. If Java 1.4 is installed
* and the extension used is supported (usually png, jpg, jpeg, bmp,
* and tiff), then those methods will be used to write the image.
* To get a list of the supported formats for writing, use:
* println(javax.imageio.ImageIO.getReaderFormatNames())
*
* To use the original built-in image writers, use .tga or .tif as the * extension, or don't include an extension. When no extension is used, * the extension .tif will be added to the file name. *
* The ImageIO API claims to support wbmp files, however they probably * require a black and white image. Basic testing produced a zero-length * file with no error. * * @webref pimage:method * @brief Saves the image to a TIFF, TARGA, PNG, or JPEG file * @usage application * @param filename a sequence of letters and numbers */ public boolean save(String filename) { // ignore boolean success = false; if (parent != null) { // use savePath(), so that the intermediate directories are created filename = parent.savePath(filename); } else { File file = new File(filename); if (file.isAbsolute()) { // make sure that the intermediate folders have been created PApplet.createPath(file); } else { String msg = "PImage.save() requires an absolute path. " + "Use createImage(), or pass savePath() to save()."; PGraphics.showException(msg); } } // Make sure the pixel data is ready to go loadPixels(); try { OutputStream os = null; if (saveImageFormats == null) { saveImageFormats = javax.imageio.ImageIO.getWriterFormatNames(); } if (saveImageFormats != null) { for (int i = 0; i < saveImageFormats.length; i++) { if (filename.endsWith("." + saveImageFormats[i])) { if (!saveImageIO(filename)) { System.err.println("Error while saving image."); return false; } return true; } } } if (filename.toLowerCase().endsWith(".tga")) { os = new BufferedOutputStream(new FileOutputStream(filename), 32768); success = saveTGA(os); //, pixels, width, height, format); } else { if (!filename.toLowerCase().endsWith(".tif") && !filename.toLowerCase().endsWith(".tiff")) { // if no .tif extension, add it.. filename += ".tif"; } os = new BufferedOutputStream(new FileOutputStream(filename), 32768); success = saveTIFF(os); //, pixels, width, height); } os.flush(); os.close(); } catch (IOException e) { System.err.println("Error while saving image."); e.printStackTrace(); success = false; } return success; } }